From 43cd8f601d4e225fde3dd06d234d759ab8476c59 Mon Sep 17 00:00:00 2001 From: Atharva Rasane Date: Sat, 1 Jun 2024 08:15:08 +0530 Subject: [PATCH] AI driven WatermarkingTechnique for Safegaurding Text Integrity --- .../00_myst_template/Algorithm-Encoding.png | Bin 0 -> 26717 bytes .../00_myst_template/BERT_WATER_MARKING.ipynb | 100348 +++++++++++++++ .../00_myst_template/Correlation_Matrix.png | Bin 0 -> 66996 bytes .../00_myst_template/Dataset.png | Bin 0 -> 320047 bytes .../Distribution_of_P-value.png | Bin 0 -> 65685 bytes .../Distribution_of_average_others.png | Bin 0 -> 121838 bytes .../Distribution_of_highest_ratio.png | Bin 0 -> 103205 bytes .../Distribution_of_t-statistics.png | Bin 0 -> 71375 bytes .../00_myst_template/Results.csv | 4001 + .../00_myst_template/banner.png | Bin 0 -> 506825 bytes .../atharva_rasane/00_myst_template/main.md | 788 + .../atharva_rasane/00_myst_template/mybib.bib | 238 + .../atharva_rasane/00_myst_template/myst.yml | 57 + 13 files changed, 105432 insertions(+) create mode 100644 papers/atharva_rasane/00_myst_template/Algorithm-Encoding.png create mode 100644 papers/atharva_rasane/00_myst_template/BERT_WATER_MARKING.ipynb create mode 100644 papers/atharva_rasane/00_myst_template/Correlation_Matrix.png create mode 100644 papers/atharva_rasane/00_myst_template/Dataset.png create mode 100644 papers/atharva_rasane/00_myst_template/Distribution_of_P-value.png create mode 100644 papers/atharva_rasane/00_myst_template/Distribution_of_average_others.png create mode 100644 papers/atharva_rasane/00_myst_template/Distribution_of_highest_ratio.png create mode 100644 papers/atharva_rasane/00_myst_template/Distribution_of_t-statistics.png create mode 100644 papers/atharva_rasane/00_myst_template/Results.csv create mode 100644 papers/atharva_rasane/00_myst_template/banner.png create mode 100644 papers/atharva_rasane/00_myst_template/main.md create mode 100644 papers/atharva_rasane/00_myst_template/mybib.bib create mode 100644 papers/atharva_rasane/00_myst_template/myst.yml diff --git a/papers/atharva_rasane/00_myst_template/Algorithm-Encoding.png b/papers/atharva_rasane/00_myst_template/Algorithm-Encoding.png new file mode 100644 index 0000000000000000000000000000000000000000..8baf14e245db5b0bf2098e4626015e5673e35829 GIT binary patch literal 26717 zcmce;XIPU>@HZNo2q^F<zvQ$ydSu7C->~m?C$K$?EGeVYht8*k@*TU2n4#QtMkAN z1frpWK(w*v&jG)DD_D35e9#1#X{&>32Cr=ZU+CTM8{P+jz9g`mK4t*EGx_P*1b{$X zH_!fPbj@z;fA<$uSYty5U|OqRy_ ztyY}CqZr~uy@hW+$=xOOq1FdHMgud zNmVE_3bwWm9(qnV=s!3hM&_4&tU9rBa4^R;kB9Dia?pT4gC38mj37`v|1lkC;olet z@F?(q=rObH%1<=og1M!H|Jr+3|F7R_qGXQvs*@o0);Fs6_qVU`H?p5b)2I8W2tXdz zns+GRHCoTr9RH~ub@_AHUIJf>Xg${>K@aDT21_a3=oUq%xPIfYYcf6#i?FZM0 zTM{s!7sBuw+l|i=v;Xj*Nd*ouViP;tH;PCNU-Z>{uOHJr+g7rY0B={NKUQIzmh<~L z+uGWf8HwG#l$dj#qIONJA*1gN`Ze~FeXX0ew&gz5H6v>7!7k&{!wzhD?z~q5&>CvoSK`bK@V9e@)JRoO(OTeQ;UQASbFMf<&@ znjPsDs8=ifvSbW|y?{ijIBZ?>(ClbyEtBX18v!dqgM_MFcPE_uknG|-xrEv`9?Xas zeCG#xt-!wfOPh1-l=om_u*UyBl{JW|$A=!?={{e>yfM?D{%!{G*=yllV^Cn)e)A8u zv(;5?K~ZxZN!yp~zmWXA6#UxMULA^m_KKR*+GUJz!~|t7;0o$n6)S8Z(cm3`-paLp z2Zt;~VeUh5n?l@vFvvCaUB*xH4T!{#n%%`d_tPBctV4PuntrgIkx%$@o@R|qswl=n zNih-rgzqANIM-v5IXa6h-2a@?#By}(T#uEq2d0k0?6}2CLL_35KL%2e_-)bX#f_^^=y=MVvbU;!$iw0q=g|Lw zu68}$Yh{*&b!r8)pB0V!w{5=GK>Ci;Qa(eS*(_N1jPY52()r&e>3?qE2j>)OT`{9a zceVKj0kT2<*j>=!-HWA3Z5#QtGIxUJP7Z;6#rGDAn89UoXs zrOmwz+AU2?5Ge@~2zbtn2?AvQdnUV<`EjM%Ay$8_Z(a%_0YOYn*NiFljVC}PSa$NH zF3VV4)got#z<{aj2Ij2TplsJ+e^%RNZ@jTDkjWD(=-cG*ODgOEh28VpL^XZo?av;i zs4QipI+UcvarP+KVL{?G;_~=5*s})XK|xkCMH;RLc!e6ZGc>4Fk^~PPT4cPzO8=QH zL`|C18;{S}WSm1-LyC790NE_u%#ZmJvAB01Q2riEr8}w`uBYSIao&fAMOEKAd8GxP z$uzSjih}ZqwLW@I@YXh&pDb_cZIoSGyipSPD-P?&Y);g6*3-+Fb_c6h+3SRX#MbeE z!M;aP7kye^wyJp(w96C!xPLl5UD3VHmflmyyUOX=TG}T*4=W(V?${2`)=X?oq)HGj z9Ob9d{NFOA_a12;`SNl>CF=-ZP4T-I-@eFxYS6iv1nK(48G5J71Ca9D{;n~s zZCTEZDpQv(l^qI>@%p1n_KR2~Ekp%DtWqeq4eiQT zcYkqi7q^PW^;0NfG})c;uF{jT$XN=-z&<&n^SE4}Pq4APO29h+^~Q%HFLw3UKY8C8 zKX7zJ0bA%y@9Jc7zsbAF!qptC>3-l(7K^&izuC4iwOxxs**BM~>83md+Bl@47) z-n`L-{pkCuXZ(Wa^j$*t)y&MN&{(tM)oISC?|^p18!hJSQ6iPX9Gud*RAlmliSs`g zvA2R+@A8zY?JXSWe72|$f{x^v)_>qE*Faq7k=l3l9b1R1{C2dl5SksxKB;{{8fzdC zVu{zBXgr2uEz04Tb`F z?%$!6{4$X{O}mjBcb?TMM+8>P-5DFSD?n=`Fy5Xdxh0PYL>(hr&0oy0gP4T+7Kem5~J7HOWz|g2FSUZ@@;ZU@=eWN zWguh}wyeQ)DgxLL_PA_F?nl73?ZK6nr;!~=iw2PwHQ5sD$a?}|e)i|wTLXS3q~}Q@ zNd0PA&$&iVXs-ZvGQ?+#Db$0gjUV01GK_k^O&EvdOYI}E8bXlH!fEi}!!HuwMI!!) zuXA+x0Jh;33-CA(n4JoI z&uAtCnVp0P&sYI5TL7`n`W*mVniSxNUU77&%>$zA0-kuWcsKCU|E2-lOZfk|7k-7) zSbpLE@h=R?8oSzqZ;N+To=kRf3dKrg9Mc2>;;o%|yhVprF$qsY{cY!f+z;2_=trjKteK2HIG?!8+0 z|ML;=$q^1#W3_4Y&1>i0OF^7I(9_o+AlvyiYt}R^F$Z9vQ=cCn@kQBd?lSt1%yQqgqV180XXnz&($J5fT1o^>9yel-=3;nwpNY=kWj6 zGpmjTsceF+>1{S-A?m?@kt2AK(V1Tn(jQ20B_fSl`m&dgGy33jZV}Mt}JnN%s&w6Z}jrFGdf2V#wdzPc^_L; zipJ}ASoHqVsH6vV-ol{FZ38!aGC&^W8Ed`RcmK9C&-P@zdY*{P{WGDOoZ~NfOx+#V zz@N6hUasV`TS4E)RVH6+zXFPC@wsPz3pEw??$hblgt-9I$y`a$vc)bO4lOw)v4H#P z=P@Z;C*E~lG$0)`U5-)4MN7KnUR~i?xn|)q#qaJJe5rhRrf4uIv4M21T{x|qtGoU&ReFfsp0;j#~Id^`hkY|4zwVC6~*Lh)#XQA z{w6>R4Pkt7@RoBB6DaB+cY3dYqrPwc_YLmO|3k--;20{zF|lp2QK*Tm9@#jnDpkbV zi=!M`Wi=fqI%tVn^~deok+<}wUBQ!9{fE?`oE4hGTt>=c?wl9%Vy;h;XWP8?q;5cW z+*)7$ayfz>PxYNXn|Rp$?0_&l=1$BQ2*oqlJm&s8GQ@Ps`|C2vX5$a;@-%m@xy;Wm z*;GFwcB#AA&F8>}__f~u`>`Txh-|G!t!qlpOGbAssv|_zIBeQ|J!Lp^`Y5u-Joe@6 z4pz9Y?7~;EM42ml_jcqccBXI*u~mvr=`2ZCX^YuL)gyn-N*lx)X=U603^gIipW<<3 ztz}H?=ugzdU)D>paY1^AYP$T;jSZT1t!$9-%I|sM?E2M#PPXKawINq?Fb`*`+lK|{ z^9-7dH{x?0lak>Eb!ony4UOw=O(vtc1o*l_;9c!$H}ntO7Sr8OM#N=uNb)>a*U*u? zK+bB%J=|sQbDyv(9CbshHL`{QAG-${e#ufpNfL{cnRe@mo^k0N@z!gPRM$#1H}ie# znL3OQpk{XF&4jDkjVC{9-url7;Jf+i|MR)+TGace&q7@D4_G_yeug|5G*x-s^F!Ygd0SIe zpM`wUCtj6r=^Em3*wtS{DY2V72RUN_MIBVMB1SkQw_9nb1(>Rk`hF%@>#QrTa|4Ea z(89!JoG!Nsxq@1L-a7HouHQy3c+oKG&AleGz& zS$nCf^n7;{a(t%_nZEL|8$LRed(Ts)3G1%k!~-im4H`|j4wrSmn8dS5|Mij2K?5>G zBQLY`;+|)2V7qqDc4+ORT-ac0wDieo)i{$|ocLSCX79v}OT5FULDddLly&aje=%Sr zHNYTc2|7lasmW#dD~OXJmu9%Ctp|?|Ws!?sl1e9c|B=4m(94CPIMqJTcA>VcTb*yG zzUbC!!@y43y~Y%A3WN=HgMJFBBP=i2zbbB;JL5y*GZpMR_P`OoI`YVF_y)!p%-Bxe zI(R!DJkmecAbgB1T6ZS$H9sCEz>rzO^5w9NjO{)9NnG|EsT@|^fgm3Om*0!5i#oa1{WOp98}rHucnept<5)N(7v%KsCT^#9 zXK_Cva_}H0hE!@DSXqf<1~K;ZNb8>RgFveMx1>d&j3fdEkl_{*b9gywFPc$!nP5!QdP!gLYB*CDvQsZ~E5ed;>fovz*<+4;Ahh*K;P zT`5yFKc%!|9FuX6?MEEWO||Af>MGnx`MWNnxIpp})TO^FR{xAPXja`p)ivDpBQrhb zDu==!t@_CI?(r{o3VM_|R>?bmgmvKh$*fCmAm?2r?!W}rS9hM+m0T}ONTGqf?w%&7 zK81!equLX!J6XIFV1_!P-2px*FBrP}npZLN=qo2-+L3jGMm z+b8k!%gj0gdkKHX#!pm5a#0Se4|fTZH`;}QB`OcDag`OYpC1!3FF7S^RU#o%Y=6>QrhZ`Swy~13cr@tC_sZMUtWVmiKB7xojD`4?ihp_D7x$*Xe zVM{+#z|lE#!C@MG?)82}bLXR&pyvWoy(!KdQg6CHV=DHF>#+IH;iQ(5VC8PTeOLr} z*4b?~;6a{*bR?Z?ZH?v3Hy?*;+pLrdvK6?4tM=0fXbpGm@AEoT&D(IY&EC1+54bt5 z%fo$SSD^Z^tlh<4-u;*PLJCd~*GXWKLb94x|YA$~PP3Y6bQzIrfNeY}CpA62@-!dCyA**y1!o~d zZw}%QQ?eehmp$$t*M^k-|{P z=Ns8N{4h#*JaU(s&lGx#^fR2j?-fhX*%ia8Gr@XGclczjyD?s=Mlh zbq9Ppz;t^hF>(1D(#UZxI(uk*zg9$(H3K~a)Z@S|`HBJa8Z_h1g7J9JK!F(5!&K?s zjTTL{ymepOVoyl4S2bDCFF5?NS%1?V^bfA1WgOV=2?80LQ#;U^ulD#yzRQ4)rA)cD z{8J+9k}br#j=bV&M=o+0Q0vw&WaNL9O(`2MkRK9r}d(Nh2t3u7e*%0Giwa0c5EiLssmKj zdU(x83O|?lk>nO0y>Er{|CY(qw0-{B&4*X@C@PoEUp&%VT`rHjJdg9N!O0;?Xybgd z4;^+cOWxYR+!~Y{uGsZ7`-7RjQ1IUT{U`~tIrW)@+NnucppcT@C`NjMcezDniP|Y; zQUSC1{ZM3rYq)Mf=zvP?qdCGrc_8X*{Mx;daBzFvQF71lmhUAS-|nSi>5d;)O}IS9 z&1gcOI80*7La(_$LB%o`pR`WShpnx}F?sIpTnwBGWw1IVn>}UH!=GYn&$8XnBguTy zms7X(s*y&lX=|y##>_oN3(qO{jlG(T$pB$lDtVQcF3k6X#f$I>mLYETc_a{Cvedd9 zb2g7peqxdPOZZK=5*Ts+Z`j9Ajcp+jsizoA8-?AHT&0H_u0}7w4#D({5V&;dOY^rUawT^{Dk?(iu*sg6#vbt=M~oo; zC^t9=lxY6HPKXKOi-y~mo8#>hn2oXcBF=!Vree{XU0BnDDF!D*_{`->mrYpmKgXY3 zGKtu$UWNanhNswJbqMgLal#32O#iaP+jvggy-KlppAaKBMq-t`ee{!*I~%hUz7@hq5Yyc^rs_3d(<%R70D`tnFi553%BN(Dhz%Q@0&H8Ypnbf578x6P%j z7lPT$JN1UC#mL*u%br;ukwtJxM}Z!(<*cqS%F{>OWr?&_yy1%0ud}W{gf`|Eskgt* zSEBE+qKAWQY7ifGU2P6VGgxb~f&ydSCR;q3+&^Nway{ix{O#RP^VATLuW|u_nwLZ# z1xh$^tHxY5p|OcyND$}rJ|cK}EUsE$oJ)Jh$0%)mzn8`*+T5nsL))DvLlKMZXt)#f z{lFu6yCExXvZ|fBVaKYK-M+sHrW+$(1FgTiHi@_CB2-j8RXm8G?r%>^8LhpQQeHFPxWEyEbNj49y!~I+>MoKis=yo~Gy1u24lywG{mrjwrO6~5g)KJ6u z{xOu}?K{FxQ)0&ZmN}tZ%D82m@yjRXT-w$9$!$MYO(z}L!#v#rCtcwC3@@!$do!^a zGuV+KQ-;LywhNJmAz)tb?ya|$>RE@|Uz?fef@6k8XvB_HrH#Xc!@#6wmc#Iu*-I(& z>8VGCyat1B)pjSk9E6|f`Q5a#7z|gce{Qf9J>Z&DKAf3|X8hmcIhwEvzxZQ-@?_pn z(6Bj~n7Q2Mvy)dqYj!X6eO~nK`suZr^p(XF_{WUITh*b*UT}no;(E^yQS>KxnB!6C zGt#%eUj`OLA89h{)VB}0}s&d(`f&4~F( zsp*`2xu4Th(rM$LCJ4HTg6Iji6l2xPW$YzCt2vQLv#-K!NQLgz`j73TbN2>r*fPtL zq3NN7cQ^W3i3zU$FLwG+qP5ueUrwH$#6{Xinet6zUrjQ(ZVU+Zza6cYCOab4iWv_y z-Q(%9*BxRO1*CU} zit`-RF>iOy-QKDld+7%bKadzVfWNh6F0`e89!KrQ)C!{E+?PQz^%5d?s#zQd92f~d zHe7tR9<=$4$&qff@)@+)an8L^APX}507BJJh$X+_7k*}0IbgUqrJjC3@<4b#F6_-o zLklu%MBLP;iccl^7A$?NQ%q6zkbhzcKF?T;7! zE%w?$r6BtG9Q9LBe62|fd&3|I3AUL(TpmxmXk-17rSKwYKDz(oS?@E zGV>`VoP$Z^w(D@QZu)w~1&Mcxd3jan-j!1-kQ^TQ@}y=`7^1L2^Db!EVT^PQFGQ<% ztlAKCmDM+ECjB7O_sZzar1AncMmeE~Ia0gEJft)TQLyTJ?(VPR^|*1b)5FzD<`e3l zT=vsH7eWMUwU0?z*b=uz9?;O0EeE|)Qi#{n7%UJ;@1sz|kIp6EtQ2{rN$U=)u{I{b z!t#Y#xVCaBNw7z5CGWGDq;3tmZ>VC+312cyCXQ6yFq;c*+MQ+f53Dd?XTZxeXNaLK!))|bBegK+WQzT>e-$`>pZY)k_hpc8@7xG*ZgJKtc6Z&wF! zjXE&LS>00ql{CT)0xfgIK%zcLlS}Qk?jSU{hU;!^)+Bxq%BGcme`eij&kQ|CA@={L z{GU)+4E()vQafig_7VuRL0r&btoYQxqHqiN9W^Mi50kg@1&uVWJ@67y8PivV@CVx^2Z$aUlfA=v=Bi9O_7d*+7iKSB07Z?p5uR#i z9ibWU^+^sc&oNd(K09X#RdVhgc1<;@mWXMaPCIes_YkM*bij6F)4OHJu=3Jy&u@;r~zu?Eg#5|9>F;L=DNdUG;FOrQwenNBL!+KM$k#KU`BVA^m61z5waliE6w5vb9WVH2^-*V`}XfT4Eou_Y+9pV#qteOs7$T zXPH;=v>8Z!WGC+!&^q1wzho+xCq*q@zx2qs8tlg+2j}k|xBM^P`uZ#*JL5?mJE0+% z>@!|VEdP(>vK+kgpS}NnmIs1^YyW&An*%9d%>l6A8`OUk++AS5{-$StU4Tq;|03JH z4{q>I0mQZHbeasf-HF59ff|~H>zo+U4QKuZ+89XU3$#d8VyIVqPal{s!bt->Rp*E-)NCV+bL1W zKU}BL0pn1U1a^Re%Rsz;R#i;ohW3g-rGQIxy)o5F0{bFxD!552y-K~amLRs`we~dw zTCT1}9ps>mIs@YUJMlh>>)`ZYf<@0Q%h5^0)6l0uf8k+mT8@~9H~Yr1m7_B~%m47Y z_z`aCvxBl3Vj#mGB&H+2(UTKcEqg;NOW~A?tf~qyl6{o9pp{;gkaZV7@DJAYA(EDK zD{A$_0-LOcl(({P45bV2sel{flJ-~oE;nRt^}fxQnPyzNFj$PZ+H>TO)~((y0%Z%H z^?$lC?<}-#y)vFz0Quzf_o#R44o%5skX}Z*bf61k^Jlmh>3r1cOQ35y6t&wltM%l~ zSFfTNNySP9AsmMr zi;ePB2r2%>LE8}&q;y6NR8nM5)qJ~nCbRnZEAj6rFRZYIczLDu^7_3OJ^vi4 zeMz980_a~0e^kRP<4>*k5$FF%(Uf zAc+n9P<9@>_$NMnGLtsS_u}!Ah$AIR=6w%9C=2NSV=0*5T>q~|LP%Ox3}G&sp4?(# z5u1G8vadYFE=#?$M|GJEbcMMK+>^L=_gadA=sKc&FBzLViYP$DyW*^MHyACYi^HQ0 zc>dg%T-O!=^#Hw^7#kxA$yg4Kbr&U~|7!1tCqpks*a9zo^i-&D?l7>7je16>d!60| z=vHSGidGawS=>kIzgCuEz%4k^lV5#aMK}r+m{)O?-duRs1qiSSAfv?vZI!?Jf0Q9d zAEXwv;qtVdUkPz z?OvFDe`n{`>m&$B2PkkU|Cuj1jz69eT)n#_1E@nm#K!pBji+^8jIHhJojWh&q%}Ed zefiLiI$+R^*9&YvzK|cKo$8I_5g}XU_V?~NX;s-ZvOY-n{u+qPc%UPEc|?K+(1%&t zNU<{!QQPOyhSI=$Y8}+Oak-?{!uGO4wSS`f%{ix4(5LQD4Z< zDN(HcM$qK*U551Vdy7OuLQVm^sc@$MSDDo8i=4`Fo}p#7DT|x#hX;7ZTujC(jUNku zB>QwJq1#Bw$hCQ>fcxRib-`~JE}RzVsg72Z81PKrSEcgc89OfmuxT--MoBcl8k`=F z#TyKRM_%i9Bu?enwVl$;&n)+i0hXoKycc{{KBYZaUdcr~w9{VBs7la5N`{siQ&X?U zs3sGR?n=Scs@k~DR<4FmodQ}G0wUga!Il4QRw`85LkXln!F53xF7i_uYfbXymC#al zzj+fp<5~6FA8)ME>Od>9Y*D2ZA&-m14I1>n=$~%t>jQtrjb4!vS||;;Mb9q}P;PG@ zj}5k>j>j*rU5qKG>)5B0cKUoN71)-Yoim>5$HS7Vz7(3t-Y!o6!*MgD-rhm z%HzBTI!-qW0vP8CnQ1k#BJ>5s1rN;$+txbuQWcbw@59f=@eQ96!;!uc8TsSV?sSms zg#0}G^5#%Os*)7+CJq*6KZvNa+kqb@dMiD2MbNm&$3SWa-(gqqaqfiP#(a#hgpx&X zyLEl?Gxx8lRtDXCnvIw2XNCRknr2iVt*@TsxK7vAZU5b*7?Dgk$ZZQ=WZDGe1WsQ6 zZZ`EYtU;+Is3%40B2;A5vg>&`k*WFMtqPuO&HDS7u_#t1Y8!y-Cqe@In3^eB#>@HE ztU4LK0D>T9*Be{5qR+=kj*_+ss^Ys^&<)y7DLbrQR?Tv{cH!Gie2?sxp-}Z-M-5e5 zSeh)=GF1N;gvwt&Q$#g0{gEkmpm&b&-`Dl2aH_wT0{@Et0Ql&4u#@s5GflebHwy_u zgr(BI(vK;Q#Gw+#5+}qJ;?4?#Kc&Lpd(QBt@JdqhlV(^wbQjap+Rm@M#={wLU2CA@ z&UZQ>8!}DM{&nL*zO1%Z1#MTDwih+hhvh-Xy{{!3g_Yh80*%V8#--`o8J6ITS$EKM zG{9>gunx=b%8q(8#MpXy=pK|h*zyZ9vLBSVR2m%0RfIR{)RCjHTVts2#kWjSz!867 zoAozq(UWbE0%%fuhVBb7va0DDFgo=1oGlVdc{T5x-T<7mAKHf~bdq4+df0jmoH%a(kF# z$K36Pk_($ZKW5Yn!d_RvCv|Ne9Z-Iiunk`yzHkADkZO$lqV@`rEex@P=}3nACn)a^ z-XYx@zITtV=>D~PjuObO-i!o=#dtmj81|-dsnvNJvi6Xjo%nj-;nZ zMnX?UY)*U+F`&-{y(wsBHl(q1zg7{4wm`!+gW9rpqP7^03wh$s68tt~vSZ>!C^tC`h z<~&8GZ?;u*Iis=^b$}3FKJAil*g}GUl z>kufD(X)!QkPNUiq)Kp}YG1_1M5?C!Q_#h$Y%AC&Z`CUu*y`qCm-UI6rtJC6MTQZe zf!XtmPK;zWf^}s9LJ9~b)cz`k75?G?J@@J?Df^lPjrWjp=a6%(o2_;2bJ?Ey62vqB zc-Z)<36?W$#E%TY)$|9x=JIZjJac9fF{5L>643O6Q;RI=YLG!`RytZo!g zM-dbYpbBLLC^2d9`eXm+(fcMnAm~`aVAHb1SbG;D<@{$|8R#iO&lje1R%TvXY3^XI zc9N5Qn8y%*e*%^X-rAD(b<^Y~hO!@-qT@kq=ok8D zp9uU9DR*Qp>6eQ^dwT8SbMAyL?AS%ak?ZB z6X`2>{5=GZE%N_^{Jq>8%-W}^(Vo>ksGRk^Xd|t+mQ?mmR>u=v@ay!`tB<2BeP4wI zWx$GBW#2l;c-jq2nUJTbPvUC%bj`)A3Pi zlAv^}4>=uwnk+_w*`QTEt8DGQk1HH-m)lV>xaxxr9W71>|IPSu%8Tco+fXMnehtQV z%5C^W-8^y`!E*?#HXO{-g}IMNpX@0rdA%~H;EK<{YOl1g*#5o|c=h4()S`*BC0|lT zTWWK{hQeOpqbt`FE_~TK@HxE3Be!3h$uEOCHWjh;L74j^-h>(5SU87qe=*l_A7=Fy zHZ>CMebP^I(OR$5d(|r_SVTvN9=RkUA({TM-eq&ND`->p&M4_sL#X~U??>W!L0PGW zamwzgClrbhe!)GEuY{S_l-eRvCUWdASJiGlv4L0oO3 zRiGYlN#n4_u5(C2503gz9%P|@(lG@Yu_7kIKyGVma|<9XvcWwXuSC+ryXS~YGztXn zR@a|9&eBA{R=HW|Z_JL5e=m6Gi&);8ZIHv12bKy|(5|Id2gSONZD%E&o(TPkXZFzd zsg68M5go27;M%VHVPsSfNQz@hseW#OTWzu%tQI;P=7v>J< zxSW|~b!5n4U!z|n7g}v6{IY@f0<--6sw)u%ziMJVTlRh!R(rAhBT$;o%bczHLduLr z>RMBq?O{0$dBDtG==fI<%a4hftq?>LBCzJdIKxYmF86LL5zG6nAv(nc&K+AS+@TEr zlAC7I3sqM?-iF%oB_4KM2|HK|iH41>aBFh=pBxZ4fn*Dy=S)GGKW-!68-`xMK^>{s ztpK%9%Qz+q7E;I-r87 zL2=l)W!)Y+|Ek)*^PEq+eal;m;(><~h{dz$6A7xF5)H<7eZ5sl3yEc+0nX58id3YDc7) zh^bsC@+yBzDWd!r@9cH!+IqV>oxZ)2^2kis$l9Lj+1KG5%GN_8uaXu7+_@N*&L#Vw(G-4!dF zRXm%XdUlVb6BLzU%hnb{d_FKvOdqszZeV6>wn z)h)?hzPP;&Z&k0nM_>+xzP5`6K?|lVY?i?FaFmZK+quk{OqwXRo&&!)S-k3Ehp$YN zvHT>7_P|J`%jm5^b|q4I*c%e;gYaP-8<0A35^B=psvH8RjU1l5RqczN@g`8~C;glXsNoIArX1INJe)K8I`VGs6X1^c52Q2aH z`kb{-?eZhw1|OA!VZkD*)p0OcISyKX;KXOz5oV=igFobxCT_>$e}#|Z`)+d9`r4(z zWV@%cmrtL}BIexRq&jwVedsjyIS5Jm05fnxmob*bqSs0?ztFwF&6S~NC*dmol^1Tk z09&_R&H zd0n~8vYI=Ps_6A9SuNkRrSfN&H^WuFTD(H@c$)TJ4ExccyS;zE{=@pXUg-yKskQtu zzqc@TR$fIHX8LIZr&%P6cs2#0t}pIMDIZ^XG1G$(3e2DeKNg;D5j5trvHxgvH~uSq zac|C|WLQH7-H_^)WEV?n2}*bcS@lQ(hj4jS(Tsh@`b3s<|C87R zNxNYduh@L4_|4IP3Nlj4QZ8&i*m^*)^YJ)BUX_CvyFS3tX2D#!Lp`5ZKQ%YyXHyMU zccf^STvNDjbE)U$JfoM(pPV%O@cR|%VrMU#JM}t|A~!^-CX5zVPgN4h&~}SVn6P zwN2(qk^9_mk@gZOD>ygY!CGPxfxe$^D4Cq|x*HJ;KFhLQu^?H6`p=fL3c)*F$uqrR zYl~i-6Ec;p?-qTrwtMIMJR7M%-HwE;St^al-Z({GbxiK|fTY>pQG;j}X_DE77#U}v zP*w!sXMFziHuNxA;AY$v%?`y8p#*O!sDViG+ZQu^OLCU@oRYNK$HK8ushOwbE{={0 z0zsIn8srOi8sSxmg?@Ke?pJ~ORwPeLS@(ofIhwsJKAsX^?ci5NQ}i3?T!3p5aXYW- zW_vNdBFRWLEPo$*&n&(MGx#aG_qMKAs_xCC$S^60nkPe)1-e^5BvsFs?zsB6K#xNB z$xQTc&p%x#G^$-_*^!Pkaa)~d9086LGpSjv8DJxbupI`#1mLq^N-C*WLkejhdt|#mjI->xb8IQi9GQ%YT}4T zZ4`hkdmTb;uZHea&97j7Jx^v+{VgM+YaOX0z#po+Og7f@!ClTN1Aj5HcqblOW=3&0 zZMFdSW^~tVi|*I#h^`TQazhExF2FqoxsDJ$uh0{xmR>sU;NCUQ+Y+yeKSU7|>0lOy;I%8Uc%^F!ZT;|>PTOlawo!D2#&Q%l zUYn*2*9guEv~gExXM8J}cy^n@7NJs+)-SSiJptZa^;H>2i-x$uO0J0o2Y=&-&6CS7 zQGX~YGE!ZoQ6cdk^aULJL~+jN7`X@;ZrO{d``Zc@fLY;yf}B6WQ)O?EJv7rVCd;@pi}mc!kmN9MwZR1H;q@X()U7u3%GPP{HZmzzOw9F2Y(=`Q3do+p zxHfHA;;H-UJR-=E zkrtAz0^}Pd@|F%r&Pu<3+r4+ONNA7_){WJTtu!xe@k#rHF%j3;Qc{B+if45^jyIsx zhPRWi23ho7))!k@skg*Sy_oI8L%Dk8E#goNcfblU3khz>&w(sN>L9Mb(^XS?XE{hH zxbt)+t=2W6DmE&>RowssyP}1bFm_*pWhcSCk;$Qz?8KASaeQ2zVoy~gNAKbH%wTZy z+1;H#q7dWK$cFab8zF}x*b*1ZH049v67M-p3TVuC08+KgjkM@#Vbi3b0$5!4;#6F| z@$|PQ%mKU2sAVHJ2tTuYy?OD1Ww@QSj~O#+|4uCEff)I-9tHX=)|+_Cm;&<_4m|=k zkN%4+faBTSq#*2Y13n`syr=P1aH>28d8fhxGxN*)evMtj7dX34Z?cu-x2d%x?$H*OP%>L>;B!$RJH_ z;-PiRZwB)D6F(p;YSg?vb{F3tIKNC83QSH2TFL68xArb%PwWGrkDfm2?&>_$(vjcT zU|ke-Yv;NkDbbq|xF`Hw7eO-cN_lcvmJyVsL9*-VPxiNI`KVXP!<8t*%2$=djj559 zQ4fOt(W4xU@8EZj2c^HhaZv&R9IpoYixF4G9b5-39ZFU`I+Ty?n@DQFPlOff#rAjT1nA*v3T4*O$|og~ZVT40H))BuHKR<9 z>emS}UXKF4Y^lX5w^o_~VTGi7pr>F75S_y#I!uftUC=Q8Hw$dX7dFslk^WEJLl?c~ zOM>r(^-@kaFFIoPu-7qKgcz_zL{tIx1TSEdV790hRL0`^3as=L;` zMN*bMr9Po}WD!fr^2v^w^2-7?7g_)zDnqh*3F~*DHH6r zCFan-bj&n+*O2VQJ|(nwL})j&3)<4bwWcL6So*BpTL7O;d`4&2heHCcc{VP`+l9wM$efhjnGO-MZo~gl_?mQFk0gv@dZI|)NRAyxL@}5+1 z8Uj}FJ{3GDFXHM1YcvxT#a_Nvi_?ia4fQEe-t}(IwI8* zhAUeu^PYM6<6fTYAL~~A1TFxZ1=fO~Vq#@ny(ou= zw^Wqa528x}2spb(#KG=29R4MxEli({XSk;R)L6G!SF@Ecy6SZPcrD*kW{m>Pu>Ac~ zxIQHf*h0bD!HGsb_f|mDQK$z6Z}TiNrKq@oSp2I>PPVN%Eefv0pEz2nHyv6)-s&!Tr*>g#e0Skn4<(|DND>3;#D|;x*9x$dn z)d%51{S^IQ>_!QXJq>iIH&ASUA)aG@12`(8Rhju`0rXdV^rL0|+}+=OV85rRAbdBc zrBGnC(c>H+G}`KB0M%AN41-J(aSu*Kq31M&@-%CC`DX!aQ+(Wv92gnk5+3TTmt^qo zn4{xE^P4Ce+oxtTck6*#h2HH9vE_M*>%noNsSwPtLq~)nFw+AJU0{!MXK*-?B%$fQ zKg^1s36rX?HhMNYWffs^lS+CL>y1egram(jwz*f)`T_i-jieT!0Zeg{*PyqU)clx6 zX`Fa!eU7I=rClDrLBMkIP1{jq+UYXUUtt%}wNPDm;=J7a@)8YT!)2`jJHaCgoHNhP zn?y>cI`NTRzAmiQKfsNJW+m;qKDt^~=8_d}Z-$fov}NSsN{w`j)R~+>7S`jJq(Vtt z7B~FY&pa2x4wn$;&eqi}>+|>g3$+|;_jekr#7*iWpq@*M?SPx<;RGrT811J}^`~y? z)pASh3k_QemWTJ zM^N@?LruofRNz<^pEfJ}?a)*-;2^K2Y)LbCLcU>ndsytdATh9*2^OMH($WbXz2m^3qy#ds`8!* z9(Y!*a&c$EEb_UodqAuj#;V%&SK(!R>k21+*^3j1xjFqG;~LDuyo{T@474|9(no0) zm?<+fj0an(QiT7jw=)lf^85S#*bX0Lw3g4w=vvDzx(&x&tK2|{Bz&`U2|P$&bh90E$8$3ocHH-;7_hb zd+k_z{DHT$4N;og`1lSl^>i*?B(Z?u-kB9BYczm&P>fiYU+ZEdue1Pl4Gu`lc)=Az z>Td$E?SHpWGz6|M~N0 zJ5UQdg?M;PGzM+;L0)o$w?pqpXjL1XV4y9liiO47Mnk3~L4TzmW4my@e9SY~JjX02 zhyl_8L_n?ov>khOl>P}s4ksU${Yzj3cE3O|xE^JKg59CHxqo+qf!rJGQUZMf-_M;MK=c+Mj|wd7d~_gs}SGCOx~ zrt*dAoDhuk`tbu)`XDX>gXr>>jWfPtP&c42&ihe9tWP54@G8(lHH#S|Q`bj6e`k5R zRzbVFqmenX9UZXs^(|I;1$iI&Tp~JAU39u?-E2q(LX3K}umSb5Wbq(JGFKl1-_-H9 z7Naj!LLc>Nux!ae-moqbR8A&kMR7s-$`L?Ea_@sZVM64YN&P7n$nVc7wd$K!uSzrc z`v}Ta8~bf6HHkC&dZ9#kyA#EKV^uEG{{KBJ|LxQC-xlaKh`nB2n)`(ETcr9)v{7+k zm}gyO$HxzzeG4WN6drzNg?=Mj1v4zaqh}rIS0EjJ>^fm=?>%qfulyT0$DiZF7vKkf zt_&}Eso4gF&Ci*&-+UgLxr3L1jvuTZiZzYwQ;MSSVI#DZ&YEU3yHNGR@#KIOYzR;y zpH8<$hJSGz5ewLCY_0yj%{X4EGwP!$G`~66SdNMPz57e2wI|G6Fp(sAT7G3oVNIhA zBWe9cQCxDHM>6+>9aq^zXyjK=~w9n9sjo~b~? z1>Gq8#uU*lseOv}nXKhx5xNJ(XrHuy0InrPAS>mGSD3~;h~=wd6GS;)`6lmXT_1jC zoLSGA^)&Q7&%Bu1vb@&kGkXsss^HQokDqGzFk??%+<~GkqdE_GJLU=cp+=mgul z)P1w$2%A)Fi?cxrj{DK*1yhH9HKtpR{eACaCTusGYREOtC}MT|dBcsWpxj#Ha}A`B zXSS?PXiNU=_(N)1JhgVV)6>;x12MT&l4VQ$jWi(Cv6c*T_9<}KCGI*w)fqlpuDkTs z-K4-24hMfDFL*dQqYX$+T?NTdBUL!d)M;b6#5}TgM5a3eQLxzm!;@pyr~CbODO!c@gwCu2MlRL@q050g;T z@wID)OG!l4l`_-*NG35hI-8(c5D(|&gM8nx3+shpQ^|JQ=C_XzW27$^BT-V%8@Edy zd@&cYF(9oQ<0JGO#_{VIHooIukSnBZ0S6rZ8=Fw3#%qiGy2aOd5si(Vnff_C@`qLb z_Q7k3XHH`ACuFMM)IGeR9Mvk=)1)}d#GPw_C?jlF$wueSW_+u);0oWxG@+BLabH8n zzJ;0V)Ou44vmM2{D8x<|(z_FqID;b2&7xmzk~oi|_vdXo=`V~ro*HV7a6?nLWy6Tc zbXo?Z?U@si<6xG&HOuNSSh4CG4tS^6`Y^$Dk=;ajloEwb|MZ#LA=6u$bsU5RVWSk| z#pSRBty+R=jEdTAcl)DhY?M{U6w{4?7Ve5ZkF}~MiR?<`|v4;jr}~Vm5}0XtlTboJHj-@m!ZMQ^97+PC4ND4Q(6hYz;9?S=Am})#-Ir(&qW766s|bPdk}h zSc}&T9^KNl6dl&T6p8g##~d;~t=-Lg)aJ?eO=8X8_W9jTshu=R8Z*0T?|~FU2IZ-J z)(vY;&8|~usN+0;wZP^cY4Ma45r5)|QOi$i!yjd?EPwo_(@h3rg+d8k<%(OqYY}RG zriU{VXO(Y%ASSbT!ZRH9M3g_}SZ2`G%2l2)685a^OqknjyM5|g&pbg&IP{!fWL5TO zbZNH+?CQOaER%^Ru_@J(hJG#d?r4=Qu2+p6eFq$FLe&c0GYoCJ4{N=bH>FU%Tm6oM z`C*B9#aGUK>p9M8ktz`NEcK`FtI$#GKa4XB%nz;wpC^&|+^{^CBgLLMpKeeRcBdKG zSY1>`I;f4wVbY+mO1~lfM)ua5r8#eRgpp=EA!&3^=K^gIN`e6alM4+Sl-JB>i^`|s zv=_g-+U#hzHh5dP$R>|6rq)fr9?!3e9oKbZoL46EOynUtcwkWFivyG@`m`%fa<@iV zUNlME!HOqZL!onjIu|G3zQ0KW^0XJeUmI&Ox9q(e3#O~CT<9|M{nh@aB$b+ zHDSukEKg*>ZfSL|g2RA;M!{FiY1}SuVR$xKFO}ovq_>s%so&HorhrF$Y1Jr|nsHI1 znc7}t#nZaIuBCI~zsaVj&oL}BtPYHKa0($1>^B6v?wKhSLhF){Ilo6AHFLk7|^96yk~j1NYos&e|dO({{x%3(0bqcI|NID zAkwgOWKL0wjAB0+Y*eYaYuZpm+wrM#Nunyshd8L-QMbNTI$Uagf88$X{$3U7v2TnXF?D=++ySJ)zoMFB)sFEu7t)=bc}^Nd(#~@+7fD7s=5@bNUiPnN>nA^X9jZ;21J8-jRnU$7CmjTuu3OQS$D-Z-P%*mg>}GXK+5LH1qm{~3vzLFYYeadw=d{&eB(fZka##AU8-`h z=#n@+5@|Xp6TAMM|3G&ccj)Z`|G&&McJ_T4f1jW2O`2>m58E1LmRFuWK(+HjnpT)Y zJ@vEqzNz&Q+6R52T|)CCBAM4oaUh9>5DKiR;|FnE1GO(`J!i`1W=xT`x5;(d2lrOC zV}{~l7jE5$i#pNa>h>D6t{U)+ug9B84O$!)#puQnhM}_SJkLK*3N49uHTd+sFBr&t zeQzZ!>~ED2qQDMR3|L(tT;Tg9HJ(a6egIfNA3$galJuH)*vorq3>x0Q0%K=2?ir`9OQgy-caB2Mkpd)B+KU^HyMwY!#ZAwi>I;!P~D51p6(0 z`)S+UdOY%`^LHCR|JO-O$j1p8au6`tkYI-`+U#&iW_4+buNQWC`h>&c-CxbkdRo3y z)&~(zty&48j>6bfUF^?~o~4SAN)Q*AtDJMJCouRmxiqyPwzm6KZS7g4dl=)WFpkYU ze5+bd-ziU6p+}WgKV^0hN@jQ0PC#LQHIDC-kV%AcWsX|HlLM`cYYKPQ_e$BsN*59T zDE`7J1jkdY%cNct76eM*@8T4SA9=o;{AigX@i8@;tK}>+1i*EqY@W}Vk|NxM$G;+J z$84-#<_>45jD0FYZS>?`l)?ORTSB~gR6plIftlJK-l6#2*^krA=P2!bn7|=kG1usq z_~`b&tf%RWB|o)2+crvhowtPT6_MY{lusr5sa+PKPgVjK>k5@3Ortxfp7{rcr#E+c z&Zq~jnPnvEbG7OL-wtNcchU&qv>M3hyj_=YUD{*-q6pG=d``MMjL=Cq}M>dSZs>a_D%gmzY$`R7BH)ozr` z?4DlVsTS0Ft!QMgX_t}`9d|n>zkhhkS%As+?h4~VYSGJ9N_+t-g;2S}7>}|j>|r?O zBknDhPz+_j64?bmG2JkC{p6j+bJuQr9rZb|;N0W99tMh;YB#%*xg)T3w=O_HRIvA6 zW6H;Q(PLg61UcpSdZ&Jci;KpKPQC+{$ysG)W_~}U6I%-Ncv;(dT8Iz%!E7|Nzy90l z%ktxXxa!MkMa<8XUBKCNTOqzTDBAY$!*Q0@w9Z=VD0}7M>zfCuPxi|+(Vgj%aI5w8 z7EZ193g00!`Woyy5h7c_lbXs}C)F)S9Nny_zUUjQ^@ z?3Q-_2=#T!As5T^*zxh+kG|B?#Q$a2b47JK+jNCaQi+#XQv}HrdHhR(Yu7)W?|zm; zd&IBPwZCRJAU{lJPyQw}Gv+I<)EUggT!a^3sXJQHs)avP!@|O?t}R$c)0_hjOR5|; zOD4L!_Hv|G00E2q9o^dJ@<>tYMgf2_(RFNjD$;Hy-=@(bBmCN&Pw;$0_jhwSS737w@RMT<~k-G zDOLi_xb*{hUhCk1E5t@Sv7w4-PFYD`N8t%>9^C^tE}J#ST=p2ZqM>$o2YZEvw0qw* zDZ&6#Dg1g*;K*%ii*21J77P0bEojKd_VfRkR8l{E?W3P5h-qDpx%SM+v7}RR{r#%Y z^q_tMs|j^79{zPpv+=7pYeOejiVtJ8nv@jnNv2NL!Ut#17KFroJ+4gO0ls@6?``Gx z()c%!d-OjQu_e>XvD}su=pQjrn!o>;0`p+g3nX~WpqSKax*fX)qL|P zfi0f70mp7Glkwr3Xlvx3Gp-Df+jAtxzHU8+gMx&y{`|Xt|2R==ra8VPHXW8JjiPbx zX-L16tCSVhQ+0Mg#-7ZZ@g^L!{bdQBx6%{Q3Og2?jkpBiqWRmEu=l{-pE46H( zt(=KNEa|_ziY{H0}S7jS$VccyZ;rS`md2S* zi-Q6uU!5Ltlp^N_Hs9D1);17v0%xDep&sA~E6kO6EEGGgeOQ409L7r%Rg_Xj8(e4=janFwZrnW2@j zjcq#T_+>KxXmtH}`0)c175F4H4X^hs5$;HU_{NWo^}(7q5Xk7?r@%q0-K{?_UGWM> zl}`8VS2h(0u)_eK1$KC$MAZ2S|olN1R>=XvAE6j?gKdnhJWiesMVT=b*y&Q6~`M68Q`}k{2mF z=fN#>m+{%7&{U8}=0PtV2xq%9#Zt3Vi{8Y?Xiu0K7+j&s^Ou4f21Ak-nyz?%2qca- z0URMDsa$hn*#7td3{XHJ%FN@cRmKl@NE0iQx)Rm-+ZO>xRhh8Y65d$e1wt~uno~V*>e(Q2LF9AbB;@>kH~^*c z?@365%Iixx>kzh`A64a-R7#5UE|5I)ywG=NBE|D!J@QUe*u-Ys8ZmIx`B|XZ7;3Xd zisd1x%q*GQczEoU`Bg+Ob*K(mjDJYpUa?r8i+8jK#QfkA#<;BXH+YVA<(`kQT!dk7WKaC#y<+i2pm&+?I z{2krlrikE*oFPR^!ILfuF2&8Hmpq}?lh!&j2X&pZ;{h|4CVEGP2^ipLYjkDKW@IB* zWuE#W%_UC^b=pF5&&hEz0a z@E=M#_w#nPQ@0aZXX&?)jn?8DJcGmSo3@Ue>n3%x_o>ZE>|8n@e;7nPMUj#@>tG?o z@zzIA5zOQGlb@@N!pD}`HVQobsNn z3wHYz6)l{NPM@0COcgT2=qHGmEGPa0U;lRf_C3Z@%N0F_*Vitww7V=v<$?@;J9E=Z zm_GD8Aul1<;hm5(2;-r$$$|NFhq|MIiwYOEW#fjH<(r&({#aW(B+EG`Kr;=OACWm6 zJh%FTcb#TXizpYwE8QEExdzQGbQZo&A5-wD>h2N2IT!P)pbA|&Mo7iz5xjW;D|%SU z0#l$A7?7OPCEkq-a_a1dO}b1?-^H`7}Cq_a4MlR1PdlQK^087s(Rt%a0{O`V`MA$$jGsuz5eU9RiIW zw3+VM#IiKwobdf(6K78p6msu48p6D+R?daV&TP2pGe`#S!^BQ)@|r8Nbm3Yu%`UYh z9GqN`O~qB-Sl_+_k0O*w#GM1L|JmwVb{jWlIBHVg&$2%^HL_LBtCDidCuKwX)0&fs7VPpX6%DiQhPpKZ#W1~ICMf5QAhm^On$02cump9H zqGMEeoKRtSDkgV3qJUh{($r>ApZ6tWD}6M{r0USv@s|&MIh0%E;v3q>HD(LcdG58D z)9|~^`olRankC+6%{W3=$WlgyRj@PKT%UiwJY2m*mr;P^k6nJtNO(h6F3uT-GtFb+ zpY6(HKzLK$%FaCM_ju13TKU_}^WqwGx>mfAQ!=d#FUsq*iiv8Wpidr>Yy~14%auSM z9ockw1KyY9fEm-& zU*AjpV zr)=lolj^SC7_kN3KyJAS12K5y1L!V6F%iZ`X&VfPk_1*2`4I)LB{;j6av;h~gI1&?_b zY+eM)^ht)*Q%JK{qAGl1yj!eM}EGp!#N>n!r5=wo*pk*I7g@{IV_cg`2cNs-v*9YC^C1=#L5Na>EN2#nNw$GFK8C z-Hz@OCzw-{1{`{<5GnEZMsRCyR)^fE+nmh$8^qv*kmE2t@0a8|0&uu}uNZc|_@EJ1 zxFrP;BmFW5$x8Y03v`(8ubo^^dNI1JgK(F|*6qpUsh;q5?{6K{xMv5@ZT)bkWFPCo zpzg;^rZ2i@a#fF_o10^DqcGd%W_pJ`Cd9~1F@icMAyAzZy??JW3RDwyP*BFl`GHQM{FtQ-W# zh@j3?pRcdwIbFOD#K8ShT%e4kyssvM-r?Mof-UFX<=Bs_2li&x>+rRy zT&6G5=R~Aqt{b9HvSC@Cc-l2H4*0!WkTuvZ(5oRkfUsmX^_+~a76TJz`bR)dd|6r2 zwPe28(&imRO%5@$T4AB5mVFdQwpPdEJ)I3Jj0AdNXRp(TRWwxpY&;xx_u)bD{z)ZI zA^u_3UY`137Mwqg&NBn?Q;XEpveg@~U@+`}K$=Dzj2X_+n~wOEI91_mSLO?59p%T~MeY znzwlOTJvr2vee}YUSf#R_4`p~AG4j6IbNaZ9-zU&WvCLZyytC(W_nUu>h3y#e^VTMCAEWt_@CC3_YegJO{xs$QdaRGQ7QKi5xO<|8{ zgjR!3+T(ug3WKlOedh6{M*MHz{-vw|T)+$z^wpk__XmxDO0TP~ARz@vc@ewq z5yX*Gg>ZEx?j2me4|a?MrEZ#RgPC5pchvfPV!pRq2h8 z&%b}30+-^;S=T|+pmguwZ76{@0`+&wj(mxhqpM`!mpbwfj(mwNP{r~8{EzApS6>hY z4F8H2x16{`?`IL}N?Z^(%&Qx4-~k!gdKKlG2LKEQKKx3&&Hx%hEPFxsYz^=FFJVct n(*NSZHvf-hbhO!b4`~LaR3bm+>_melfgqZy+V}HSEdBoj*Z_ui literal 0 HcmV?d00001 diff --git a/papers/atharva_rasane/00_myst_template/BERT_WATER_MARKING.ipynb b/papers/atharva_rasane/00_myst_template/BERT_WATER_MARKING.ipynb new file mode 100644 index 0000000000..909f18b65c --- /dev/null +++ b/papers/atharva_rasane/00_myst_template/BERT_WATER_MARKING.ipynb @@ -0,0 +1,100348 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "Zbc6iyyjRDhh", + "outputId": "c51037cc-9e81-4255-d3f7-e22259c2f78a" + }, + "outputs": [], + "source": [ + "!pip -q install langchain huggingface_hub transformers sentence_transformers accelerate bitsandbytes" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "id": "OaBDymOCRId8" + }, + "outputs": [], + "source": [ + "import os\n", + "os.environ['HUGGINGFACEHUB_API_TOKEN'] = ''" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "id": "4dhYR_6WRLN2" + }, + "outputs": [], + "source": [ + "# text1 = \"A common use case when generating images is to generate a batch of images, select one image and improve it with a better, more detailed prompt in a second run. To do this, one needs to make each generated image of the batch deterministic. Images are generated by denoising gaussian random noise which can be instantiated by passing a torch generator.\"\n", + "text1 = \"\"\"\n", + "Once upon a time there was a dear little girl who was loved by every one who looked at her, but most of all by her grandmother, and there was nothing that she would not have given to the child. Once she gave her a little cap of red velvet, which suited her so well that she would never wear anything else. So she was always called Little Red Riding Hood.\n", + "\n", + "One day her mother said to her, \"Come, Little Red Riding Hood, here is a piece of cake and a bottle of wine. Take them to your grandmother, she is ill and weak, and they will do her good. Set out before it gets hot, and when you are going, walk nicely and quietly and do not run off the path, or you may fall and break the bottle, and then your grandmother will get nothing. And when you go into her room, don't forget to say, good-morning, and don't peep into every corner before you do it.\"\n", + "\n", + "I will take great care, said Little Red Riding Hood to her mother, and gave her hand on it.\n", + "\n", + "The grandmother lived out in the wood, half a league from the village, and just as Little Red Riding Hood entered the wood, a wolf met her. Little Red Riding Hood did not know what a wicked creature he was, and was not at all afraid of him.\n", + "\n", + "\"Good-day, Little Red Riding Hood,\" said he.\n", + "\n", + "\"Thank you kindly, wolf.\"\n", + "\n", + "\"Whither away so early, Little Red Riding Hood?\"\n", + "\n", + "\"To my grandmother's.\"\n", + "\n", + "\"What have you got in your apron?\"\n", + "\n", + "\"Cake and wine. Yesterday was baking-day, so poor sick grandmother is to have something good, to make her stronger.\"\n", + "\n", + "\"Where does your grandmother live, Little Red Riding Hood?\"\n", + "\n", + "\"A good quarter of a league farther on in the wood. Her house stands under the three large oak-trees, the nut-trees are just below. You surely must know it,\" replied Little Red Riding Hood.\n", + "\n", + "The wolf thought to himself, \"What a tender young creature. What a nice plump mouthful, she will be better to eat than the old woman. I must act craftily, so as to catch both.\" So he walked for a short time by the side of Little Red Riding Hood, and then he said, \"see Little Red Riding Hood, how pretty the flowers are about here. Why do you not look round. I believe, too, that you do not hear how sweetly the little birds are singing. You walk gravely along as if you were going to school, while everything else out here in the wood is merry.\"\n", + "\n", + "Little Red Riding Hood raised her eyes, and when she saw the sunbeams dancing here and there through the trees, and pretty flowers growing everywhere, she thought, suppose I take grandmother a fresh nosegay. That would please her too. It is so early in the day that I shall still get there in good time. And so she ran from the path into the wood to look for flowers. And whenever she had picked one, she fancied that she saw a still prettier one farther on, and ran after it, and so got deeper and deeper into the wood.\n", + "\n", + "Meanwhile the wolf ran straight to the grandmother's house and knocked at the door.\n", + "\n", + "\"Who is there?\"\n", + "\n", + "\"Little Red Riding Hood,\" replied the wolf. \"She is bringing cake and wine. Open the door.\"\n", + "\n", + "\"Lift the latch,\" called out the grandmother, \"I am too weak, and cannot get up.\"\n", + "\n", + "The wolf lifted the latch, the door sprang open, and without saying a word he went straight to the grandmother's bed, and devoured her. Then he put on her clothes, dressed himself in her cap, laid himself in bed and drew the curtains.\n", + "\n", + "Little Red Riding Hood, however, had been running about picking flowers, and when she had gathered so many that she could carry no more, she remembered her grandmother, and set out on the way to her.\n", + "\n", + "She was surprised to find the cottage-door standing open, and when she went into the room, she had such a strange feeling that she said to herself, oh dear, how uneasy I feel to-day, and at other times I like being with grandmother so much.\n", + "\n", + "She called out, \"Good morning,\" but received no answer. So she went to the bed and drew back the curtains. There lay her grandmother with her cap pulled far over her face, and looking very strange.\n", + "\n", + "\"Oh, grandmother,\" she said, \"what big ears you have.\"\n", + "\n", + "\"The better to hear you with, my child,\" was the reply.\n", + "\n", + "\"But, grandmother, what big eyes you have,\" she said.\n", + "\n", + "\"The better to see you with, my dear.\"\n", + "\n", + "\"But, grandmother, what large hands you have.\"\n", + "\n", + "\"The better to hug you with.\"\n", + "\n", + "\"Oh, but, grandmother, what a terrible big mouth you have.\"\n", + "\n", + "\"The better to eat you with.\"\n", + "\n", + "And scarcely had the wolf said this, than with one bound he was out of bed and swallowed up Little Red Riding Hood.\n", + "\n", + "When the wolf had appeased his appetite, he lay down again in the bed, fell asleep and began to snore very loud. The huntsman was just passing the house, and thought to himself, how the old woman is snoring. I must just see if she wants anything.\n", + "\n", + "So he went into the room, and when he came to the bed, he saw that the wolf was lying in it. \"Do I find you here, you old sinner,\" said he. \"I have long sought you.\"\n", + "\n", + "Then just as he was going to fire at him, it occurred to him that the wolf might have devoured the grandmother, and that she might still be saved, so he did not fire, but took a pair of scissors, and began to cut open the stomach of the sleeping wolf.\n", + "\n", + "When he had made two snips, he saw the Little Red Riding Hood shining, and then he made two snips more, and the little girl sprang out, crying, \"Ah, how frightened I have been. How dark it was inside the wolf.\"\n", + "\n", + "And after that the aged grandmother came out alive also, but scarcely able to breathe. Little Red Riding Hood, however, quickly fetched great stones with which they filled the wolf's belly, and when he awoke, he wanted to run away, but the stones were so heavy that he collapsed at once, and fell dead.\n", + "\n", + "Then all three were delighted. The huntsman drew off the wolf's skin and went home with it. The grandmother ate the cake and drank the wine which Little Red Riding Hood had brought, and revived, but Little Red Riding Hood thought to herself, as long as I live, I will never by myself leave the path, to run into the wood, when my mother has forbidden me to do so.It is also related that once when Little Red Riding Hood was again taking cakes to the old grandmother, another wolf spoke to her, and tried to entice her from the path. Little Red Riding Hood, however, was on her guard, and went straight forward on her way, and told her grandmother that she had met the wolf, and that he had said good-morning to her, but with such a wicked look in his eyes, that if they had not been on the public road she was certain he would have eaten her up. \"Well,\" said the grandmother, \"we will shut the door, that he may not come in.\" Soon afterwards the wolf knocked, and cried, \"open the door, grandmother, I am Little Red Riding Hood, and am bringing you some cakes.\" But they did not speak, or open the door, so the grey-beard stole twice or thrice round the house, and at last jumped on the roof, intending to wait until Little Red Riding Hood went home in the evening, and then to steal after her and devour her in the darkness. But the grandmother saw what was in his thoughts. In front of the house was a great stone trough, so she said to the child, take the pail, Little Red Riding Hood. I made some sausages yesterday, so carry the water in which I boiled them to the trough. Little Red Riding Hood carried until the great trough was quite full. Then the smell of the sausages reached the wolf, and he sniffed and peeped down, and at last stretched out his neck so far that he could no longer keep his footing and began to slip, and slipped down from the roof straight into the great trough, and was drowned. But Little Red Riding Hood went joyously home, and no one ever did anything to harm her again.\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "id": "4z1BCY7gbGkG" + }, + "outputs": [], + "source": [ + "text1 = \"Quantum computing is a rapidly evolving field that leverages the principles of quantum mechanics to perform computations that are infeasible for classical computers. Unlike classical computers, which use bits as the fundamental unit of information, quantum computers use quantum bits or qubits. Qubits can exist in multiple states simultaneously due to the principles of superposition and entanglement, providing a significant advantage in solving complex computational problems.\"" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 472 + }, + "id": "z_iG32CAERAp", + "outputId": "c652b092-834c-4802-b7bd-d495b94b5744" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "data": { + "text/plain": [ + "'Quantum computing is a rapidly evolving field that leverages the principles of quantum mechanics to perform computations that are impossible for classical computers. Unlike quantum computers, which use bits as the fundamental unit of , quantum computers use quantum bits or qubits. Qubits can exist in multiple states simultaneously according to the principles of symmetry and entanglement, providing a significant advantage in solving complex mathematical problems.'" + ] + }, + "execution_count": 30, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from transformers import pipeline, AutoTokenizer, AutoModelForMaskedLM\n", + "import torch\n", + "\n", + "def watermark_text(text, model_name=\"bert-base-uncased\", offset=0):\n", + " # Clean and split the input text\n", + " text = \" \".join(text.split())\n", + " words = text.split()\n", + "\n", + " # Replace every fifth word with [MASK], starting from the offset\n", + " for i in range(offset, len(words)):\n", + " if (i + 1 - offset) % 5 == 0:\n", + " words[i] = '[MASK]'\n", + "\n", + " # Initialize the tokenizer and model, move to GPU if available\n", + " device = 0 if torch.cuda.is_available() else -1\n", + " tokenizer = AutoTokenizer.from_pretrained(model_name)\n", + " model = AutoModelForMaskedLM.from_pretrained(model_name).to(device)\n", + "\n", + " # Initialize the fill-mask pipeline\n", + " classifier = pipeline(\"fill-mask\", model=model, tokenizer=tokenizer, device=device)\n", + "\n", + " # Make a copy of the words list to modify it\n", + " watermarked_words = words.copy()\n", + "\n", + " # Process the text in chunks\n", + " for i in range(offset, len(words), 5):\n", + " chunk = \" \".join(watermarked_words[:i+9])\n", + " if '[MASK]' in chunk:\n", + " try:\n", + " tempd = classifier(chunk)\n", + " except Exception as e:\n", + " print(f\"Error processing chunk '{chunk}': {e}\")\n", + " continue\n", + "\n", + " if tempd:\n", + " templ = tempd[0]\n", + " temps = templ['token_str']\n", + " watermarked_words[i+4] = temps.split()[0]\n", + " # print(\"Done \", i + 1, \"th word\")\n", + "\n", + " # Output the results\n", + " # print(\"Original Text:\")\n", + " # print(text)\n", + " # print(\"Watermark Areas:\")\n", + " # print(\" \".join(words))\n", + " # print(\"Watermarked Text:\")\n", + " # print(\" \".join(watermarked_words))\n", + " return \" \".join(watermarked_words)\n", + "\n", + "# Example usage\n", + "text = \"Quantum computing is a rapidly evolving field that leverages the principles of quantum mechanics to perform computations that are infeasible for classical computers. Unlike classical computers, which use bits as the fundamental unit of information, quantum computers use quantum bits or qubits. Qubits can exist in multiple states simultaneously due to the principles of superposition and entanglement, providing a significant advantage in solving complex computational problems.\"\n", + "watermark_text(text, offset=0)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "Im0WJEePMfFD", + "outputId": "921e03a9-d449-47eb-903b-c297327e6011" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{0: 0.5384615384615384, 1: 0.6153846153846154, 2: 0.5833333333333334, 3: 0.6666666666666666, 4: 0.5833333333333334}\n" + ] + } + ], + "source": [ + "from transformers import pipeline, AutoTokenizer, AutoModelForMaskedLM\n", + "import torch\n", + "\n", + "def watermark_text_and_calculate_matches(text, model_name=\"bert-base-uncased\", max_offset=5):\n", + " # Clean and split the input text\n", + " text = \" \".join(text.split())\n", + " words = text.split()\n", + "\n", + " # Initialize the tokenizer and model, move to GPU if available\n", + " device = 0 if torch.cuda.is_available() else -1\n", + " tokenizer = AutoTokenizer.from_pretrained(model_name)\n", + " model = AutoModelForMaskedLM.from_pretrained(model_name).to(device)\n", + "\n", + " # Initialize the fill-mask pipeline\n", + " classifier = pipeline(\"fill-mask\", model=model, tokenizer=tokenizer, device=device)\n", + "\n", + " # Dictionary to store match ratios for each offset\n", + " match_ratios = {}\n", + "\n", + " # Loop over each offset\n", + " for offset in range(max_offset):\n", + " # Replace every fifth word with [MASK], starting from the offset\n", + " modified_words = words.copy()\n", + " for i in range(offset, len(modified_words)):\n", + " if (i + 1 - offset) % 5 == 0:\n", + " modified_words[i] = '[MASK]'\n", + "\n", + " # Make a copy of the modified words list to work on\n", + " watermarked_words = modified_words.copy()\n", + " total_replacements = 0\n", + " total_matches = 0\n", + "\n", + " # Process the text in chunks\n", + " for i in range(offset, len(modified_words), 5):\n", + " chunk = \" \".join(watermarked_words[:i+9])\n", + " if '[MASK]' in chunk:\n", + " try:\n", + " tempd = classifier(chunk)\n", + " except Exception as e:\n", + " print(f\"Error processing chunk '{chunk}': {e}\")\n", + " continue\n", + "\n", + " if tempd:\n", + " templ = tempd[0]\n", + " temps = templ['token_str']\n", + " original_word = words[i+4]\n", + " replaced_word = temps.split()[0]\n", + " watermarked_words[i+4] = replaced_word\n", + "\n", + " # Increment total replacements and matches\n", + " total_replacements += 1\n", + " if replaced_word == original_word:\n", + " total_matches += 1\n", + "\n", + " # Calculate the match ratio for the current offset\n", + " if total_replacements > 0:\n", + " match_ratio = total_matches / total_replacements\n", + " else:\n", + " match_ratio = 0\n", + "\n", + " match_ratios[offset] = match_ratio\n", + "\n", + " # Return the match ratios for each offset\n", + " return match_ratios\n", + "\n", + "# Example usage\n", + "text = \"Quantum computing is a rapidly evolving field that leverages the principles of quantum mechanics to perform computations that are infeasible for classical computers. Unlike classical computers, which use bits as the fundamental unit of information, quantum computers use quantum bits or qubits. Qubits can exist in multiple states simultaneously due to the principles of superposition and entanglement, providing a significant advantage in solving complex computational problems.\"\n", + "\n", + "# Calculate match ratios\n", + "match_ratios = watermark_text_and_calculate_matches(text, max_offset=5)\n", + "print(match_ratios)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "id": "L8Ht1GNUMgvv" + }, + "outputs": [], + "source": [ + "from scipy.stats import ttest_1samp\n", + "import numpy as np\n", + "\n", + "def check_significant_difference(match_ratios):\n", + " # Extract ratios into a list\n", + " ratios = list(match_ratios.values())\n", + "\n", + " # Find the highest ratio\n", + " highest_ratio = max(ratios)\n", + "\n", + " # Find the average of the other ratios\n", + " other_ratios = [r for r in ratios if r != highest_ratio]\n", + " average_other_ratios = np.mean(other_ratios)\n", + "\n", + " # Perform a t-test to compare the highest ratio to the average of the others\n", + " t_stat, p_value = ttest_1samp(other_ratios, highest_ratio)\n", + "\n", + " # Print the results\n", + " print(f\"Highest Match Ratio: {highest_ratio}\")\n", + " print(f\"Average of Other Ratios: {average_other_ratios}\")\n", + " print(f\"T-Statistic: {t_stat}\")\n", + " print(f\"P-Value: {p_value}\")\n", + "\n", + " # Determine if the difference is statistically significant (e.g., at the 0.05 significance level)\n", + " if p_value < 0.05:\n", + " print(\"The highest ratio is significantly different from the others.\")\n", + " else:\n", + " print(\"The highest ratio is not significantly different from the others.\")\n", + "\n", + " return [highest_ratio, average_other_ratios, t_stat, p_value]\n", + "\n", + "# Example usage\n", + "text = \"Quantum computing is a rapidly evolving field that leverages the principles of quantum mechanics to perform computations that are infeasible for classical computers. Unlike classical computers, which use bits as the fundamental unit of information, quantum computers use quantum bits or qubits. Qubits can exist in multiple states simultaneously due to the principles of superposition and entanglement, providing a significant advantage in solving complex computational problems.\"\n", + "# match_ratios = watermark_text_and_calculate_matches(text, max_offset=5)\n", + "# check_significant_difference(match_ratios)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "uZpox66dMgyW", + "outputId": "293a9391-11f3-4dae-953d-510297d6cc02" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Done 1 th word\n", + "Done 6 th word\n", + "Done 11 th word\n", + "Done 16 th word\n", + "Done 21 th word\n", + "Done 26 th word\n", + "Done 31 th word\n", + "Done 36 th word\n", + "Done 41 th word\n", + "Done 46 th word\n", + "Done 51 th word\n", + "Done 56 th word\n", + "Done 61 th word\n", + "Original Text:\n", + "Quantum computing is a rapidly evolving field that leverages the principles of quantum mechanics to perform computations that are infeasible for classical computers. Unlike classical computers, which use bits as the fundamental unit of information, quantum computers use quantum bits or qubits. Qubits can exist in multiple states simultaneously due to the principles of superposition and entanglement, providing a significant advantage in solving complex computational problems.\n", + "Watermark Areas:\n", + "Quantum computing is a [MASK] evolving field that leverages [MASK] principles of quantum mechanics [MASK] perform computations that are [MASK] for classical computers. Unlike [MASK] computers, which use bits [MASK] the fundamental unit of [MASK] quantum computers use quantum [MASK] or qubits. Qubits can [MASK] in multiple states simultaneously [MASK] to the principles of [MASK] and entanglement, providing a [MASK] advantage in solving complex [MASK] problems.\n", + "Watermarked Text:\n", + "Quantum computing is a rapidly evolving field that leverages the principles of quantum mechanics to perform computations that are impossible for classical computers. Unlike quantum computers, which use bits as the fundamental unit of , quantum computers use quantum bits or qubits. Qubits can exist in multiple states simultaneously according to the principles of symmetry and entanglement, providing a significant advantage in solving complex mathematical problems.\n", + "Original Text:\n", + "Quantum computing is a rapidly evolving field that leverages the principles of quantum mechanics to perform computations that are infeasible for classical computers. Unlike classical computers, which use bits as the fundamental unit of information, quantum computers use quantum bits or qubits. Qubits can exist in multiple states simultaneously due to the principles of superposition and entanglement, providing a significant advantage in solving complex computational problems.\n", + "\n", + "Modified Text:\n", + "Quantum computing is example a rapidly evolving field that leverages the principles of quantum mechanics to perform random computations that are impossible for classical computers. Unlike quantum computers, which use bits as the random insert fundamental unit of , quantum computers use quantum bits or qubits. Qubits can exist in multiple states simultaneously according random to the principles of symmetry and entanglement, providing a significant advantage in solving complex mathematical problems.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{0: 0.5714285714285714, 1: 0.5714285714285714, 2: 0.5384615384615384, 3: 0.38461538461538464, 4: 0.7692307692307693}\n", + "Highest Match Ratio: 0.7692307692307693\n", + "Average of Other Ratios: 0.5164835164835164\n", + "T-Statistic: -5.66220858504931\n", + "P-Value: 0.010908789440745323\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "data": { + "text/plain": [ + "[0.7692307692307693,\n", + " 0.5164835164835164,\n", + " -5.66220858504931,\n", + " 0.010908789440745323]" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import random\n", + "\n", + "def randomly_add_words(text, words_to_add, num_words_to_add):\n", + " # Clean and split the input text\n", + " text = \" \".join(text.split())\n", + " words = text.split()\n", + "\n", + " # Insert words randomly into the text\n", + " for _ in range(num_words_to_add):\n", + " # Choose a random position to insert the word\n", + " position = random.randint(0, len(words))\n", + " # Choose a random word to insert\n", + " word_to_insert = random.choice(words_to_add)\n", + " # Insert the word at the random position\n", + " words.insert(position, word_to_insert)\n", + "\n", + " # Join the list back into a string and return the modified text\n", + " modified_text = \" \".join(words)\n", + " return modified_text\n", + "\n", + "# Example usage\n", + "text = \"Quantum computing is a rapidly evolving field that leverages the principles of quantum mechanics to perform computations that are infeasible for classical computers. Unlike classical computers, which use bits as the fundamental unit of information, quantum computers use quantum bits or qubits. Qubits can exist in multiple states simultaneously due to the principles of superposition and entanglement, providing a significant advantage in solving complex computational problems.\"\n", + "words_to_add = [\"example\", \"test\", \"random\", \"insert\"]\n", + "num_words_to_add = 5\n", + "\n", + "# modified_text = randomly_add_words(text, words_to_add, num_words_to_add)\n", + "modified_text = randomly_add_words(watermark_text(text, offset=0), words_to_add, num_words_to_add)\n", + "print(\"Original Text:\")\n", + "print(text)\n", + "print(\"\\nModified Text:\")\n", + "print(modified_text)\n", + "\n", + "match_ratios = watermark_text_and_calculate_matches(modified_text, max_offset=5)\n", + "print(match_ratios)\n", + "check_significant_difference(match_ratios)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": { + "id": "W22siFz5Mg12" + }, + "outputs": [], + "source": [ + "texts = [\n", + " \"Artificial intelligence (AI) has seen remarkable advancements in recent years, transforming numerous industries. From healthcare to finance, AI technologies are being leveraged to improve efficiency and decision-making. In healthcare, AI algorithms are being used to analyze medical images, predict patient outcomes, and assist in surgery. Finance professionals are using AI for fraud detection, risk management, and algorithmic trading. Despite these advancements, AI also raises ethical concerns, particularly regarding bias and privacy. Ensuring that AI systems are transparent and fair is critical for their continued adoption and trust. As AI continues to evolve, it is essential to consider both its potential benefits and challenges.\",\n", + "\n", + " \"Climate change is one of the most pressing issues facing our planet today. Rising global temperatures, melting ice caps, and increasing frequency of extreme weather events are all indicators of this phenomenon. Scientists warn that without significant action to reduce greenhouse gas emissions, the effects of climate change will become more severe. Renewable energy sources such as solar, wind, and hydro power are being promoted as sustainable alternatives to fossil fuels. Additionally, individuals can make a difference by reducing their carbon footprint through actions like using public transportation, conserving energy, and supporting policies aimed at environmental protection.\",\n", + "\n", + " \"The field of biotechnology is revolutionizing medicine and agriculture. Advances in genetic engineering have enabled scientists to develop crops that are resistant to pests and diseases, as well as produce higher yields. In medicine, biotechnology is being used to create personalized treatments based on an individual's genetic makeup. This approach, known as precision medicine, aims to provide more effective and targeted therapies for various diseases. However, the rapid pace of biotechnological innovation also raises ethical and regulatory questions. It is crucial to balance the benefits of these technologies with the potential risks and ensure that they are used responsibly.\",\n", + "\n", + " \"Quantum computing is poised to revolutionize the world of computing. Unlike classical computers, which use bits to represent data as 0s and 1s, quantum computers use qubits, which can exist in multiple states simultaneously. This allows quantum computers to perform complex calculations much faster than their classical counterparts. Potential applications of quantum computing include cryptography, drug discovery, and optimization problems. However, building a practical and scalable quantum computer remains a significant challenge. Researchers are exploring various approaches, such as superconducting qubits and trapped ions, to overcome these hurdles and bring quantum computing closer to reality.\",\n", + "\n", + " \"The internet of things (IoT) is transforming the way we interact with the world around us. IoT refers to the network of interconnected devices that collect and exchange data. These devices range from smart home appliances to industrial sensors, and their applications are vast. In the home, IoT devices can automate tasks like adjusting the thermostat, turning off lights, and monitoring security systems. In industry, IoT is used to optimize supply chains, monitor equipment health, and improve safety. However, the proliferation of IoT devices also raises concerns about security and privacy. Ensuring that these devices are secure and that data is protected is essential for the continued growth of IoT.\",\n", + "\n", + " \"Renewable energy is gaining momentum as a viable solution to the world's energy needs. Solar, wind, and hydro power are among the most common forms of renewable energy, and they offer a sustainable alternative to fossil fuels. Solar power harnesses energy from the sun using photovoltaic cells, while wind power generates electricity through turbines. Hydropower uses the energy of flowing water to produce electricity. These technologies are being adopted at an increasing rate as countries seek to reduce their carbon emissions and transition to cleaner energy sources. The growth of renewable energy is not without challenges, including the need for improved energy storage solutions and the integration of these technologies into existing power grids.\",\n", + "\n", + " \"The rise of e-commerce has transformed the retail industry. Online shopping has become increasingly popular, offering consumers convenience and a wide range of products at their fingertips. Major e-commerce platforms like Amazon, Alibaba, and eBay have disrupted traditional brick-and-mortar stores, leading to significant changes in consumer behavior. The COVID-19 pandemic further accelerated the shift to online shopping, as lockdowns and social distancing measures limited in-person shopping. While e-commerce offers many benefits, it also presents challenges, such as the need for efficient logistics and concerns about data privacy. As the industry continues to evolve, companies are exploring new technologies like augmented reality and artificial intelligence to enhance the online shopping experience.\",\n", + "\n", + " \"Cybersecurity is a critical concern in today's digital age. With the increasing reliance on technology and the internet, the risk of cyberattacks has grown significantly. Cybercriminals use various methods, such as phishing, ransomware, and malware, to exploit vulnerabilities in systems and steal sensitive information. Organizations must implement robust cybersecurity measures to protect their data and infrastructure. This includes using encryption, multi-factor authentication, and regular security audits. Additionally, individuals can take steps to safeguard their personal information, such as using strong passwords and being cautious of suspicious emails. As cyber threats continue to evolve, staying informed and vigilant is essential for maintaining cybersecurity.\",\n", + "\n", + " \"The field of robotics is advancing rapidly, with applications ranging from manufacturing to healthcare. Industrial robots are used to automate repetitive tasks, improve precision, and increase efficiency in manufacturing processes. In healthcare, robots assist in surgeries, rehabilitation, and patient care. Social robots are being developed to provide companionship and support for the elderly and individuals with disabilities. The integration of artificial intelligence and machine learning has further enhanced the capabilities of robots, enabling them to perform complex tasks and adapt to new situations. However, the rise of robotics also raises ethical and societal questions, such as the impact on employment and the need for responsible development and use of these technologies.\",\n", + "\n", + " \"Space exploration has captured the imagination of humanity for centuries. Recent advancements in technology have made space missions more feasible and ambitious. Private companies like SpaceX and Blue Origin are playing a significant role in this new era of space exploration. SpaceX's successful launches and plans for Mars colonization have reignited interest in space travel. NASA and other space agencies are also focusing on missions to the Moon, Mars, and beyond. The development of new propulsion systems, space habitats, and life support technologies are critical for the success of these missions. While space exploration holds great promise, it also presents challenges, including the need for international cooperation, funding, and addressing the environmental impact of space activities.\",\n", + "\n", + " \"Climate change is driving the need for sustainable agriculture practices. Traditional farming methods often rely on chemical fertilizers and pesticides, which can harm the environment and human health. Sustainable agriculture aims to reduce the negative impact of farming by promoting practices that conserve resources, protect biodiversity, and improve soil health. Techniques such as crop rotation, cover cropping, and organic farming are being adopted by farmers worldwide. Additionally, advances in agricultural technology, such as precision farming and vertical farming, are helping to increase efficiency and reduce waste. By embracing sustainable agriculture, we can ensure food security for future generations while protecting the planet.\",\n", + "\n", + " \"The rise of electric vehicles (EVs) is transforming the automotive industry. EVs offer a cleaner and more sustainable alternative to traditional gasoline-powered vehicles, with lower emissions and reduced dependence on fossil fuels. Major automakers are investing heavily in EV technology, and the market for electric cars is growing rapidly. Advances in battery technology are improving the range and performance of EVs, making them more practical for everyday use. Governments around the world are also supporting the transition to electric vehicles through incentives, subsidies, and the development of charging infrastructure. While challenges remain, such as the need for widespread charging stations and the environmental impact of battery production, the future of transportation is increasingly electric.\",\n", + "\n", + " \"Artificial intelligence (AI) is transforming the field of education. AI-powered tools and platforms are being used to personalize learning, automate administrative tasks, and provide real-time feedback to students. Personalized learning systems use AI algorithms to analyze student performance and tailor instruction to individual needs. This approach can help improve student outcomes by addressing learning gaps and providing targeted support. AI is also being used to create adaptive assessments, intelligent tutoring systems, and virtual learning environments. While AI in education offers many benefits, it also raises questions about data privacy, the role of teachers, and the need for equitable access to technology. As AI continues to evolve, it has the potential to revolutionize the way we teach and learn.\",\n", + "\n", + " \"The field of renewable energy is experiencing significant growth as countries seek to reduce their carbon emissions and transition to cleaner energy sources. Solar, wind, and hydro power are among the most common forms of renewable energy, and they offer a sustainable alternative to fossil fuels. Solar power harnesses energy from the sun using photovoltaic cells, while wind power generates electricity through turbines. Hydropower uses the energy of flowing water to produce electricity. These technologies are being adopted at an increasing rate, driven by advancements in technology, falling costs, and supportive government policies. The growth of renewable energy is not without challenges, including the need for improved energy storage solutions and the integration of these technologies into existing power grids.\",\n", + "\n", + " \"The COVID-19 pandemic has had a profound impact on the world, affecting nearly every aspect of daily life. The pandemic has led to widespread illness, loss of life, and economic disruption. Healthcare systems have been stretched to their limits, and the need for effective treatments and vaccines has become paramount. Scientists and researchers have worked tirelessly to develop vaccines and treatments for COVID-19, leading to the rapid development and distribution of several effective vaccines. The pandemic has also highlighted the importance of public health measures, such as social distancing, mask-wearing, and hand hygiene. As the world continues to grapple with the pandemic, efforts to prevent future outbreaks and improve global health infrastructure are essential.\",\n", + "\n", + " \"The concept of smart cities is gaining traction as urban areas look for ways to improve efficiency, sustainability, and quality of life for residents. Smart cities leverage technology and data to optimize city services, such as transportation, energy, and waste management. For example, smart traffic management systems can reduce congestion and improve air quality by adjusting traffic signals in real-time based on traffic flow. Smart grids can enhance energy efficiency by balancing supply and demand and integrating renewable energy sources. Additionally, smart waste management systems use sensors to monitor waste levels and optimize collection routes. While smart cities offer many benefits, they also raise concerns about data privacy, cybersecurity, and the need for equitable access to technology.\",\n", + "\n", + " \"The field of biotechnology is revolutionizing medicine and agriculture. Advances in genetic engineering have enabled scientists to develop crops that are resistant to pests and diseases, as well as produce higher yields. In medicine, biotechnology is being used to create personalized treatments based on an individual's genetic makeup. This approach, known as precision medicine, aims to provide more effective and targeted therapies for various diseases. However, the rapid pace of biotechnological innovation also raises ethical and regulatory questions. It is crucial to balance the benefits of these technologies with the potential risks and ensure that they are used responsibly.\",\n", + "\n", + " \"The rise of renewable energy is transforming the global energy landscape. Solar, wind, and hydro power are among the most common forms of renewable energy, and they offer a sustainable alternative to fossil fuels. Solar power harnesses energy from the sun using photovoltaic cells, while wind power generates electricity through turbines. Hydropower uses the energy of flowing water to produce electricity. These technologies are being adopted at an increasing rate as countries seek to reduce their carbon emissions and transition to cleaner energy sources. The growth of renewable energy is not without challenges, including the need for improved energy storage solutions and the integration of these technologies into existing power grids.\",\n", + "\n", + " \"The field of cybersecurity is becoming increasingly important as our reliance on technology and the internet grows. Cyberattacks can have devastating consequences, including the theft of sensitive information, financial loss, and damage to an organization's reputation. Cybercriminals use various methods, such as phishing, ransomware, and malware, to exploit vulnerabilities in systems. Organizations must implement robust cybersecurity measures to protect their data and infrastructure. This includes using encryption, multi-factor authentication, and regular security audits. Additionally, individuals can take steps to safeguard their personal information, such as using strong passwords and being cautious of suspicious emails. As cyber threats continue to evolve, staying informed and vigilant is essential for maintaining cybersecurity.\",\n", + "\n", + " \"The rise of e-commerce has transformed the retail industry. Online shopping has become increasingly popular, offering consumers convenience and a wide range of products at their fingertips. Major e-commerce platforms like Amazon, Alibaba, and eBay have disrupted traditional brick-and-mortar stores, leading to significant changes in consumer behavior. The COVID-19 pandemic further accelerated the shift to online shopping, as lockdowns and social distancing measures limited in-person shopping. While e-commerce offers many benefits, it also presents challenges, such as the need for efficient logistics and concerns about data privacy. As the industry continues to evolve, companies are exploring new technologies like augmented reality and artificial intelligence to enhance the online shopping experience.\",\n", + "\n", + " \"Artificial intelligence (AI) is transforming the field of healthcare. AI-powered tools and platforms are being used to analyze medical images, predict patient outcomes, and assist in surgery. In radiology, AI algorithms can help detect abnormalities in medical images, such as tumors or fractures, with high accuracy. In predictive analytics, AI can analyze patient data to identify individuals at risk of developing certain conditions, allowing for early intervention and personalized treatment plans. AI is also being used in robotic surgery, where it can enhance precision and reduce the risk of complications. While AI in healthcare offers many benefits, it also raises questions about data privacy, the role of healthcare professionals, and the need for regulatory oversight.\",\n", + "\n", + " \"The field of renewable energy is experiencing significant growth as countries seek to reduce their carbon emissions and transition to cleaner energy sources. Solar, wind, and hydro power are among the most common forms of renewable energy, and they offer a sustainable alternative to fossil fuels. Solar power harnesses energy from the sun using photovoltaic cells, while wind power generates electricity through turbines. Hydropower uses the energy of flowing water to produce electricity. These technologies are being adopted at an increasing rate, driven by advancements in technology, falling costs, and supportive government policies. The growth of renewable energy is not without challenges, including the need for improved energy storage solutions and the integration of these technologies into existing power grids.\",\n", + "\n", + " \"The COVID-19 pandemic has had a profound impact on the world, affecting nearly every aspect of daily life. The pandemic has led to widespread illness, loss of life, and economic disruption. Healthcare systems have been stretched to their limits, and the need for effective treatments and vaccines has become paramount. Scientists and researchers have worked tirelessly to develop vaccines and treatments for COVID-19, leading to the rapid development and distribution of several effective vaccines. The pandemic has also highlighted the importance of public health measures, such as social distancing, mask-wearing, and hand hygiene. As the world continues to grapple with the pandemic, efforts to prevent future outbreaks and improve global health infrastructure are essential.\",\n", + "\n", + " \"The concept of smart cities is gaining traction as urban areas look for ways to improve efficiency, sustainability, and quality of life for residents. Smart cities leverage technology and data to optimize city services, such as transportation, energy, and waste management. For example, smart traffic management systems can reduce congestion and improve air quality by adjusting traffic signals in real-time based on traffic flow. Smart grids can enhance energy efficiency by balancing supply and demand and integrating renewable energy sources. Additionally, smart waste management systems use sensors to monitor waste levels and optimize collection routes. While smart cities offer many benefits, they also raise concerns about data privacy, cybersecurity, and the need for equitable access to technology.\",\n", + "\n", + " \"The field of biotechnology is revolutionizing medicine and agriculture. Advances in genetic engineering have enabled scientists to develop crops that are resistant to pests and diseases, as well as produce higher yields. In medicine, biotechnology is being used to create personalized treatments based on an individual's genetic makeup. This approach, known as precision medicine, aims to provide more effective and targeted therapies for various diseases. However, the rapid pace of biotechnological innovation also raises ethical and regulatory questions. It is crucial to balance the benefits of these technologies with the potential risks and ensure that they are used responsibly.\",\n", + "\n", + " \"The rise of renewable energy is transforming the global energy landscape. Solar, wind, and hydro power are among the most common forms of renewable energy, and they offer a sustainable alternative to fossil fuels. Solar power harnesses energy from the sun using photovoltaic cells, while wind power generates electricity through turbines. Hydropower uses the energy of flowing water to produce electricity. These technologies are being adopted at an increasing rate as countries seek to reduce their carbon emissions and transition to cleaner energy sources. The growth of renewable energy is not without challenges, including the need for improved energy storage solutions and the integration of these technologies into existing power grids.\",\n", + "\n", + " \"The field of cybersecurity is becoming increasingly important as our reliance on technology and the internet grows. Cyberattacks can have devastating consequences, including the theft of sensitive information, financial loss, and damage to an organization's reputation. Cybercriminals use various methods, such as phishing, ransomware, and malware, to exploit vulnerabilities in systems. Organizations must implement robust cybersecurity measures to protect their data and infrastructure. This includes using encryption, multi-factor authentication, and regular security audits. Additionally, individuals can take steps to safeguard their personal information, such as using strong passwords and being cautious of suspicious emails. As cyber threats continue to evolve, staying informed and vigilant is essential for maintaining cybersecurity.\",\n", + "\n", + " \"The rise of e-commerce has transformed the retail industry. Online shopping has become increasingly popular, offering consumers convenience and a wide range of products at their fingertips. Major e-commerce platforms like Amazon, Alibaba, and eBay have disrupted traditional brick-and-mortar stores, leading to significant changes in consumer behavior. The COVID-19 pandemic further accelerated the shift to online shopping, as lockdowns and social distancing measures limited in-person shopping. While e-commerce offers many benefits, it also presents challenges, such as the need for efficient logistics and concerns about data privacy. As the industry continues to evolve, companies are exploring new technologies like augmented reality and artificial intelligence to enhance the online shopping experience.\",\n", + "\n", + " \"Artificial intelligence (AI) is transforming the field of healthcare. AI-powered tools and platforms are being used to analyze medical images, predict patient outcomes, and assist in surgery. In radiology, AI algorithms can help detect abnormalities in medical images, such as tumors or fractures, with high accuracy. In predictive analytics, AI can analyze patient data to identify individuals at risk of developing certain conditions, allowing for early intervention and personalized treatment plans. AI is also being used in robotic surgery, where it can enhance precision and reduce the risk of complications. While AI in healthcare offers many benefits, it also raises questions about data privacy, the role of healthcare professionals, and the need for regulatory oversight.\",\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "aupVsU8ObJM1", + "outputId": "4f4120f4-34b1-47f6-88c5-8d0dc957edae" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Done 1 th word\n", + "Done 6 th word\n", + "Done 11 th word\n", + "Done 16 th word\n", + "Done 21 th word\n", + "Done 26 th word\n", + "Done 31 th word\n", + "Done 36 th word\n", + "Done 41 th word\n", + "Done 46 th word\n", + "Done 51 th word\n", + "Done 56 th word\n", + "Done 61 th word\n", + "Done 66 th word\n", + "Done 71 th word\n", + "Done 76 th word\n", + "Done 81 th word\n", + "Done 86 th word\n", + "Done 91 th word\n", + "Done 96 th word\n", + "Original Text:\n", + "Artificial intelligence (AI) has seen remarkable advancements in recent years, transforming numerous industries. From healthcare to finance, AI technologies are being leveraged to improve efficiency and decision-making. In healthcare, AI algorithms are being used to analyze medical images, predict patient outcomes, and assist in surgery. Finance professionals are using AI for fraud detection, risk management, and algorithmic trading. Despite these advancements, AI also raises ethical concerns, particularly regarding bias and privacy. Ensuring that AI systems are transparent and fair is critical for their continued adoption and trust. As AI continues to evolve, it is essential to consider both its potential benefits and challenges.\n", + "Watermark Areas:\n", + "Artificial intelligence (AI) has [MASK] remarkable advancements in recent [MASK] transforming numerous industries. From [MASK] to finance, AI technologies [MASK] being leveraged to improve [MASK] and decision-making. In healthcare, [MASK] algorithms are being used [MASK] analyze medical images, predict [MASK] outcomes, and assist in [MASK] Finance professionals are using [MASK] for fraud detection, risk [MASK] and algorithmic trading. Despite [MASK] advancements, AI also raises [MASK] concerns, particularly regarding bias [MASK] privacy. Ensuring that AI [MASK] are transparent and fair [MASK] critical for their continued [MASK] and trust. As AI [MASK] to evolve, it is [MASK] to consider both its [MASK] benefits and challenges.\n", + "Watermarked Text:\n", + "Artificial intelligence (AI) has made remarkable advancements in recent years transforming numerous industries. From manufacturing to finance, AI technologies are being leveraged to improve performance and decision-making. In healthcare, ai algorithms are being used to analyze medical images, predict patient outcomes, and assist in how Finance professionals are using them for fraud detection, risk management and algorithmic trading. Despite these advancements, AI also raises ethical concerns, particularly regarding bias and privacy. Ensuring that AI algorithms are transparent and fair is critical for their continued integrity and trust. As AI continues to evolve, it is important to consider both its potential benefits and challenges.\n", + "Original Text:\n", + "Artificial intelligence (AI) has seen remarkable advancements in recent years, transforming numerous industries. From healthcare to finance, AI technologies are being leveraged to improve efficiency and decision-making. In healthcare, AI algorithms are being used to analyze medical images, predict patient outcomes, and assist in surgery. Finance professionals are using AI for fraud detection, risk management, and algorithmic trading. Despite these advancements, AI also raises ethical concerns, particularly regarding bias and privacy. Ensuring that AI systems are transparent and fair is critical for their continued adoption and trust. As AI continues to evolve, it is essential to consider both its potential benefits and challenges.\n", + "\n", + "Modified Text:\n", + "Artificial intelligence (AI) has made remarkable advancements in recent years transforming numerous industries. From manufacturing to finance, AI technologies are being leveraged to improve performance and decision-making. In healthcare, ai algorithms are being used to analyze medical images, predict patient outcomes, random and assist in how Finance professionals are using them for fraud example detection, risk management and algorithmic trading. Despite these advancements, AI also raises ethical concerns, particularly regarding bias and privacy. Ensuring that example AI algorithms are transparent and fair test is critical for their continued integrity and trust. As AI continues to evolve, it is random important to consider both its potential benefits and challenges.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{0: 0.6190476190476191, 1: 0.3333333333333333, 2: 0.42857142857142855, 3: 0.2857142857142857, 4: 0.55}\n", + "Highest Match Ratio: 0.6190476190476191\n", + "Average of Other Ratios: 0.3994047619047619\n", + "T-Statistic: -3.765894344306259\n", + "P-Value: 0.032757613277666235\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "data": { + "text/plain": [ + "[0.6190476190476191,\n", + " 0.3994047619047619,\n", + " -3.765894344306259,\n", + " 0.032757613277666235]" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "text = texts[0]\n", + "words_to_add = [\"example\", \"test\", \"random\", \"insert\"]\n", + "num_words_to_add = 5\n", + "\n", + "# modified_text = randomly_add_words(text, words_to_add, num_words_to_add)\n", + "modified_text = randomly_add_words(watermark_text(text, offset=0), words_to_add, num_words_to_add)\n", + "print(\"Original Text:\")\n", + "print(text)\n", + "print(\"\\nModified Text:\")\n", + "print(modified_text)\n", + "\n", + "match_ratios = watermark_text_and_calculate_matches(modified_text, max_offset=5)\n", + "print(match_ratios)\n", + "check_significant_difference(match_ratios)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "YDaqULs0MeUI", + "outputId": "a278c87d-20c0-44f2-db3d-213dcd3b2bc0" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "___________________________________________________________________________________________________________________________\n", + "Doing 1\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Done 1 th word\n", + "Done 6 th word\n", + "Done 11 th word\n", + "Done 16 th word\n", + "Done 21 th word\n", + "Done 26 th word\n", + "Done 31 th word\n", + "Done 36 th word\n", + "Done 41 th word\n", + "Done 46 th word\n", + "Done 51 th word\n", + "Done 56 th word\n", + "Done 61 th word\n", + "Done 66 th word\n", + "Done 71 th word\n", + "Done 76 th word\n", + "Done 81 th word\n", + "Done 86 th word\n", + "Done 91 th word\n", + "Done 96 th word\n", + "Original Text:\n", + "Artificial intelligence (AI) has seen remarkable advancements in recent years, transforming numerous industries. From healthcare to finance, AI technologies are being leveraged to improve efficiency and decision-making. In healthcare, AI algorithms are being used to analyze medical images, predict patient outcomes, and assist in surgery. Finance professionals are using AI for fraud detection, risk management, and algorithmic trading. Despite these advancements, AI also raises ethical concerns, particularly regarding bias and privacy. Ensuring that AI systems are transparent and fair is critical for their continued adoption and trust. As AI continues to evolve, it is essential to consider both its potential benefits and challenges.\n", + "Watermark Areas:\n", + "Artificial intelligence (AI) has [MASK] remarkable advancements in recent [MASK] transforming numerous industries. From [MASK] to finance, AI technologies [MASK] being leveraged to improve [MASK] and decision-making. In healthcare, [MASK] algorithms are being used [MASK] analyze medical images, predict [MASK] outcomes, and assist in [MASK] Finance professionals are using [MASK] for fraud detection, risk [MASK] and algorithmic trading. Despite [MASK] advancements, AI also raises [MASK] concerns, particularly regarding bias [MASK] privacy. Ensuring that AI [MASK] are transparent and fair [MASK] critical for their continued [MASK] and trust. As AI [MASK] to evolve, it is [MASK] to consider both its [MASK] benefits and challenges.\n", + "Watermarked Text:\n", + "Artificial intelligence (AI) has made remarkable advancements in recent years transforming numerous industries. From manufacturing to finance, AI technologies are being leveraged to improve performance and decision-making. In healthcare, ai algorithms are being used to analyze medical images, predict patient outcomes, and assist in how Finance professionals are using them for fraud detection, risk management and algorithmic trading. Despite these advancements, AI also raises ethical concerns, particularly regarding bias and privacy. Ensuring that AI algorithms are transparent and fair is critical for their continued integrity and trust. As AI continues to evolve, it is important to consider both its potential benefits and challenges.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5238095238095238\n", + "Average of Other Ratios: 0.4339285714285714\n", + "T-Statistic: -16.777777777777764\n", + "P-Value: 0.0004610425326555902\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5789473684210527\n", + "Average of Other Ratios: 0.4375\n", + "T-Statistic: -11.315789473684214\n", + "P-Value: 0.0014802633181549077\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 2\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Done 1 th word\n", + "Done 6 th word\n", + "Done 11 th word\n", + "Done 16 th word\n", + "Done 21 th word\n", + "Done 26 th word\n", + "Done 31 th word\n", + "Done 36 th word\n", + "Done 41 th word\n", + "Done 46 th word\n", + "Done 51 th word\n", + "Done 56 th word\n", + "Done 61 th word\n", + "Done 66 th word\n", + "Done 71 th word\n", + "Done 76 th word\n", + "Done 81 th word\n", + "Done 86 th word\n", + "Done 91 th word\n", + "Original Text:\n", + "Climate change is one of the most pressing issues facing our planet today. Rising global temperatures, melting ice caps, and increasing frequency of extreme weather events are all indicators of this phenomenon. Scientists warn that without significant action to reduce greenhouse gas emissions, the effects of climate change will become more severe. Renewable energy sources such as solar, wind, and hydro power are being promoted as sustainable alternatives to fossil fuels. Additionally, individuals can make a difference by reducing their carbon footprint through actions like using public transportation, conserving energy, and supporting policies aimed at environmental protection.\n", + "Watermark Areas:\n", + "Climate change is one [MASK] the most pressing issues [MASK] our planet today. Rising [MASK] temperatures, melting ice caps, [MASK] increasing frequency of extreme [MASK] events are all indicators [MASK] this phenomenon. Scientists warn [MASK] without significant action to [MASK] greenhouse gas emissions, the [MASK] of climate change will [MASK] more severe. Renewable energy [MASK] such as solar, wind, [MASK] hydro power are being [MASK] as sustainable alternatives to [MASK] fuels. Additionally, individuals can [MASK] a difference by reducing [MASK] carbon footprint through actions [MASK] using public transportation, conserving [MASK] and supporting policies aimed [MASK] environmental protection.\n", + "Watermarked Text:\n", + "Climate change is one of the most pressing issues facing our planet today. Rising global temperatures, melting ice caps, and increasing frequency of extreme weather events are all indicators of this phenomenon. Scientists warn people without significant action to reduce greenhouse gas emissions, the consequences of climate change will be more severe. Renewable energy sources such as solar, wind, and hydro power are being considered as sustainable alternatives to fossil fuels. Additionally, individuals can make a difference by reducing their carbon footprint through actions like using public transportation, conserving water and supporting policies aimed at environmental protection.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.4875\n", + "T-Statistic: -6.825454528992923\n", + "P-Value: 0.006434193533715181\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7368421052631579\n", + "Average of Other Ratios: 0.4992690058479532\n", + "T-Statistic: -4.1686084498315\n", + "P-Value: 0.02512588315462554\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 2\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 3\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Done 1 th word\n", + "Done 6 th word\n", + "Done 11 th word\n", + "Done 16 th word\n", + "Done 21 th word\n", + "Done 26 th word\n", + "Done 31 th word\n", + "Done 36 th word\n", + "Done 41 th word\n", + "Done 46 th word\n", + "Done 51 th word\n", + "Done 56 th word\n", + "Done 61 th word\n", + "Done 66 th word\n", + "Done 71 th word\n", + "Done 76 th word\n", + "Done 81 th word\n", + "Done 86 th word\n", + "Done 91 th word\n", + "Original Text:\n", + "The field of biotechnology is revolutionizing medicine and agriculture. Advances in genetic engineering have enabled scientists to develop crops that are resistant to pests and diseases, as well as produce higher yields. In medicine, biotechnology is being used to create personalized treatments based on an individual's genetic makeup. This approach, known as precision medicine, aims to provide more effective and targeted therapies for various diseases. However, the rapid pace of biotechnological innovation also raises ethical and regulatory questions. It is crucial to balance the benefits of these technologies with the potential risks and ensure that they are used responsibly.\n", + "Watermark Areas:\n", + "The field of biotechnology [MASK] revolutionizing medicine and agriculture. [MASK] in genetic engineering have [MASK] scientists to develop crops [MASK] are resistant to pests [MASK] diseases, as well as [MASK] higher yields. In medicine, [MASK] is being used to [MASK] personalized treatments based on [MASK] individual's genetic makeup. This [MASK] known as precision medicine, [MASK] to provide more effective [MASK] targeted therapies for various [MASK] However, the rapid pace [MASK] biotechnological innovation also raises [MASK] and regulatory questions. It [MASK] crucial to balance the [MASK] of these technologies with [MASK] potential risks and ensure [MASK] they are used responsibly.\n", + "Watermarked Text:\n", + "The field of biotechnology is revolutionizing medicine and agriculture. advances in genetic engineering have allowed scientists to develop crops that are resistant to pests and diseases, as well as achieve higher yields. In medicine, biotechnology is being used to develop personalized treatments based on an individual's genetic makeup. This is known as precision medicine, designed to provide more effective and targeted therapies for various diseases However, the rapid pace of biotechnological innovation also raises ethical and regulatory questions. It is crucial to balance the benefits of these technologies with the potential risks and ensure that they are used responsibly.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "C:\\Users\\rrath\\.conda\\envs\\py310\\lib\\site-packages\\scipy\\stats\\_axis_nan_policy.py:523: RuntimeWarning: Precision loss occurred in moment calculation due to catastrophic cancellation. This occurs when the data are nearly identical. Results may be unreliable.\n", + " res = hypotest_fun_out(*samples, **kwds)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.5\n", + "T-Statistic: -inf\n", + "P-Value: 0.0\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.631578947368421\n", + "Average of Other Ratios: 0.47368421052631576\n", + "T-Statistic: -2.5980762113533156\n", + "P-Value: 0.12168993434632014\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 3\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 4\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Done 1 th word\n", + "Done 6 th word\n", + "Done 11 th word\n", + "Done 16 th word\n", + "Done 21 th word\n", + "Done 26 th word\n", + "Done 31 th word\n", + "Done 36 th word\n", + "Done 41 th word\n", + "Done 46 th word\n", + "Done 51 th word\n", + "Done 56 th word\n", + "Done 61 th word\n", + "Done 66 th word\n", + "Done 71 th word\n", + "Done 76 th word\n", + "Done 81 th word\n", + "Done 86 th word\n", + "Done 91 th word\n", + "Original Text:\n", + "Quantum computing is poised to revolutionize the world of computing. Unlike classical computers, which use bits to represent data as 0s and 1s, quantum computers use qubits, which can exist in multiple states simultaneously. This allows quantum computers to perform complex calculations much faster than their classical counterparts. Potential applications of quantum computing include cryptography, drug discovery, and optimization problems. However, building a practical and scalable quantum computer remains a significant challenge. Researchers are exploring various approaches, such as superconducting qubits and trapped ions, to overcome these hurdles and bring quantum computing closer to reality.\n", + "Watermark Areas:\n", + "Quantum computing is poised [MASK] revolutionize the world of [MASK] Unlike classical computers, which [MASK] bits to represent data [MASK] 0s and 1s, quantum [MASK] use qubits, which can [MASK] in multiple states simultaneously. [MASK] allows quantum computers to [MASK] complex calculations much faster [MASK] their classical counterparts. Potential [MASK] of quantum computing include [MASK] drug discovery, and optimization [MASK] However, building a practical [MASK] scalable quantum computer remains [MASK] significant challenge. Researchers are [MASK] various approaches, such as [MASK] qubits and trapped ions, [MASK] overcome these hurdles and [MASK] quantum computing closer to [MASK]\n", + "Watermarked Text:\n", + "Quantum computing is poised to revolutionize the world of computing Unlike classical computers, which use bits to represent data between 0s and 1s, quantum computers use qubits, which can exist in multiple states simultaneously. this allows quantum computers to perform complex calculations much faster than their classical counterparts. Potential applications of quantum computing include : drug discovery, and optimization . However, building a practical and scalable quantum computer remains a significant challenge. Researchers are exploring various approaches, such as trapped qubits and trapped ions, to overcome these hurdles and bring quantum computing closer to .\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5789473684210527\n", + "Average of Other Ratios: 0.46578947368421053\n", + "T-Statistic: -14.333333333333357\n", + "P-Value: 0.004832243042167172\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.5051169590643274\n", + "T-Statistic: -3.25528426992502\n", + "P-Value: 0.047299956469803\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 4\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 5\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Done 1 th word\n", + "Done 6 th word\n", + "Done 11 th word\n", + "Done 16 th word\n", + "Done 21 th word\n", + "Done 26 th word\n", + "Done 31 th word\n", + "Done 36 th word\n", + "Done 41 th word\n", + "Done 46 th word\n", + "Done 51 th word\n", + "Done 56 th word\n", + "Done 61 th word\n", + "Done 66 th word\n", + "Done 71 th word\n", + "Done 76 th word\n", + "Done 81 th word\n", + "Done 86 th word\n", + "Done 91 th word\n", + "Done 96 th word\n", + "Done 101 th word\n", + "Done 106 th word\n", + "Original Text:\n", + "The internet of things (IoT) is transforming the way we interact with the world around us. IoT refers to the network of interconnected devices that collect and exchange data. These devices range from smart home appliances to industrial sensors, and their applications are vast. In the home, IoT devices can automate tasks like adjusting the thermostat, turning off lights, and monitoring security systems. In industry, IoT is used to optimize supply chains, monitor equipment health, and improve safety. However, the proliferation of IoT devices also raises concerns about security and privacy. Ensuring that these devices are secure and that data is protected is essential for the continued growth of IoT.\n", + "Watermark Areas:\n", + "The internet of things [MASK] is transforming the way [MASK] interact with the world [MASK] us. IoT refers to [MASK] network of interconnected devices [MASK] collect and exchange data. [MASK] devices range from smart [MASK] appliances to industrial sensors, [MASK] their applications are vast. [MASK] the home, IoT devices [MASK] automate tasks like adjusting [MASK] thermostat, turning off lights, [MASK] monitoring security systems. In [MASK] IoT is used to [MASK] supply chains, monitor equipment [MASK] and improve safety. However, [MASK] proliferation of IoT devices [MASK] raises concerns about security [MASK] privacy. Ensuring that these [MASK] are secure and that [MASK] is protected is essential [MASK] the continued growth of [MASK]\n", + "Watermarked Text:\n", + "The internet of things , is transforming the way we interact with the world around us. IoT refers to a network of interconnected devices that collect and exchange data. these devices range from smart home appliances to industrial sensors, and their applications are vast. in the home, IoT devices can automate tasks like adjusting the thermostat, turning off lights, and monitoring security systems. In business IoT is used to monitor supply chains, monitor equipment , and improve safety. However, the proliferation of IoT devices also raises concerns about security and privacy. Ensuring that these devices are secure and that everyone is protected is essential to the continued growth of .\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6956521739130435\n", + "Average of Other Ratios: 0.4204545454545455\n", + "T-Statistic: -4.604144446201748\n", + "P-Value: 0.019265544435432887\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5454545454545454\n", + "Average of Other Ratios: 0.44047619047619047\n", + "T-Statistic: -1.9884454013529376\n", + "P-Value: 0.1408964116764383\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 5\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 6\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Done 1 th word\n", + "Done 6 th word\n", + "Done 11 th word\n", + "Done 16 th word\n", + "Done 21 th word\n", + "Done 26 th word\n", + "Done 31 th word\n", + "Done 36 th word\n", + "Done 41 th word\n", + "Done 46 th word\n", + "Done 51 th word\n", + "Done 56 th word\n", + "Done 61 th word\n", + "Done 66 th word\n", + "Done 71 th word\n", + "Done 76 th word\n", + "Done 81 th word\n", + "Done 86 th word\n", + "Done 91 th word\n", + "Done 96 th word\n", + "Done 101 th word\n", + "Done 106 th word\n", + "Original Text:\n", + "Renewable energy is gaining momentum as a viable solution to the world's energy needs. Solar, wind, and hydro power are among the most common forms of renewable energy, and they offer a sustainable alternative to fossil fuels. Solar power harnesses energy from the sun using photovoltaic cells, while wind power generates electricity through turbines. Hydropower uses the energy of flowing water to produce electricity. These technologies are being adopted at an increasing rate as countries seek to reduce their carbon emissions and transition to cleaner energy sources. The growth of renewable energy is not without challenges, including the need for improved energy storage solutions and the integration of these technologies into existing power grids.\n", + "Watermark Areas:\n", + "Renewable energy is gaining [MASK] as a viable solution [MASK] the world's energy needs. [MASK] wind, and hydro power [MASK] among the most common [MASK] of renewable energy, and [MASK] offer a sustainable alternative [MASK] fossil fuels. Solar power [MASK] energy from the sun [MASK] photovoltaic cells, while wind [MASK] generates electricity through turbines. [MASK] uses the energy of [MASK] water to produce electricity. [MASK] technologies are being adopted [MASK] an increasing rate as [MASK] seek to reduce their [MASK] emissions and transition to [MASK] energy sources. The growth [MASK] renewable energy is not [MASK] challenges, including the need [MASK] improved energy storage solutions [MASK] the integration of these [MASK] into existing power grids.\n", + "Watermarked Text:\n", + "Renewable energy is gaining recognition as a viable solution to the world's energy needs. solar wind, and hydro power are among the most common forms of renewable energy, and they offer a sustainable alternative to fossil fuels. Solar power generates energy from the sun through photovoltaic cells, while wind power generates electricity through turbines. hydro uses the energy of drinking water to produce electricity. new technologies are being adopted at an increasing rate as countries seek to reduce their carbon emissions and transition to renewable energy sources. The growth of renewable energy is not without challenges, including the need for improved energy storage solutions and the integration of these technologies into existing power grids.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.782608695652174\n", + "Average of Other Ratios: 0.4891304347826087\n", + "T-Statistic: -5.7132994884546475\n", + "P-Value: 0.010638289400887542\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7272727272727273\n", + "Average of Other Ratios: 0.5454545454545454\n", + "T-Statistic: -2.529822128134705\n", + "P-Value: 0.08543743614799877\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 6\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 7\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Done 1 th word\n", + "Done 6 th word\n", + "Done 11 th word\n", + "Done 16 th word\n", + "Done 21 th word\n", + "Done 26 th word\n", + "Done 31 th word\n", + "Done 36 th word\n", + "Done 41 th word\n", + "Done 46 th word\n", + "Done 51 th word\n", + "Done 56 th word\n", + "Done 61 th word\n", + "Done 66 th word\n", + "Done 71 th word\n", + "Done 76 th word\n", + "Done 81 th word\n", + "Done 86 th word\n", + "Done 91 th word\n", + "Done 96 th word\n", + "Done 101 th word\n", + "Done 106 th word\n", + "Original Text:\n", + "The rise of e-commerce has transformed the retail industry. Online shopping has become increasingly popular, offering consumers convenience and a wide range of products at their fingertips. Major e-commerce platforms like Amazon, Alibaba, and eBay have disrupted traditional brick-and-mortar stores, leading to significant changes in consumer behavior. The COVID-19 pandemic further accelerated the shift to online shopping, as lockdowns and social distancing measures limited in-person shopping. While e-commerce offers many benefits, it also presents challenges, such as the need for efficient logistics and concerns about data privacy. As the industry continues to evolve, companies are exploring new technologies like augmented reality and artificial intelligence to enhance the online shopping experience.\n", + "Watermark Areas:\n", + "The rise of e-commerce [MASK] transformed the retail industry. [MASK] shopping has become increasingly [MASK] offering consumers convenience and [MASK] wide range of products [MASK] their fingertips. Major e-commerce [MASK] like Amazon, Alibaba, and [MASK] have disrupted traditional brick-and-mortar [MASK] leading to significant changes [MASK] consumer behavior. The COVID-19 [MASK] further accelerated the shift [MASK] online shopping, as lockdowns [MASK] social distancing measures limited [MASK] shopping. While e-commerce offers [MASK] benefits, it also presents [MASK] such as the need [MASK] efficient logistics and concerns [MASK] data privacy. As the [MASK] continues to evolve, companies [MASK] exploring new technologies like [MASK] reality and artificial intelligence [MASK] enhance the online shopping [MASK]\n", + "Watermarked Text:\n", + "The rise of e-commerce has transformed the retail industry. online shopping has become increasingly popular offering consumers convenience and a wide range of products at their fingertips. Major e-commerce companies like Amazon, Alibaba, and others have disrupted traditional brick-and-mortar shopping leading to significant changes in consumer behavior. The COVID-19 has further accelerated the shift towards online shopping, as lockdowns and social distancing measures limited online shopping. While e-commerce offers many benefits, it also presents challenges such as the need for efficient logistics and concerns about data privacy. As the internet continues to evolve, companies are exploring new technologies like augmented reality and artificial intelligence to enhance the online shopping .\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5454545454545454\n", + "Average of Other Ratios: 0.4599802371541502\n", + "T-Statistic: -2.439848527409759\n", + "P-Value: 0.0925127409364643\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "ename": "KeyboardInterrupt", + "evalue": "", + "output_type": "error", + "traceback": [ + "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[1;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", + "Cell \u001b[1;32mIn[14], line 24\u001b[0m\n\u001b[0;32m 21\u001b[0m \u001b[38;5;66;03m# print(match_ratios)\u001b[39;00m\n\u001b[0;32m 22\u001b[0m list_of_significance_watermarked\u001b[38;5;241m.\u001b[39mappend(check_significant_difference(match_ratios))\n\u001b[1;32m---> 24\u001b[0m match_ratios \u001b[38;5;241m=\u001b[39m \u001b[43mwatermark_text_and_calculate_matches\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtext\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmax_offset\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m5\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[0;32m 25\u001b[0m list_of_significance\u001b[38;5;241m.\u001b[39mappend(check_significant_difference(match_ratios))\n\u001b[0;32m 27\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m___________________________________________________________________________________________________________________________\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n", + "Cell \u001b[1;32mIn[6], line 32\u001b[0m, in \u001b[0;36mwatermark_text_and_calculate_matches\u001b[1;34m(text, model_name, max_offset)\u001b[0m\n\u001b[0;32m 30\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124m[MASK]\u001b[39m\u001b[38;5;124m'\u001b[39m \u001b[38;5;129;01min\u001b[39;00m chunk:\n\u001b[0;32m 31\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m---> 32\u001b[0m tempd \u001b[38;5;241m=\u001b[39m \u001b[43mclassifier\u001b[49m\u001b[43m(\u001b[49m\u001b[43mchunk\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 33\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[0;32m 34\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mError processing chunk \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mchunk\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00me\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n", + "File \u001b[1;32m~\\.conda\\envs\\py310\\lib\\site-packages\\transformers\\pipelines\\fill_mask.py:270\u001b[0m, in \u001b[0;36mFillMaskPipeline.__call__\u001b[1;34m(self, inputs, *args, **kwargs)\u001b[0m\n\u001b[0;32m 248\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__call__\u001b[39m(\u001b[38;5;28mself\u001b[39m, inputs, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[0;32m 249\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[0;32m 250\u001b[0m \u001b[38;5;124;03m Fill the masked token in the text(s) given as inputs.\u001b[39;00m\n\u001b[0;32m 251\u001b[0m \n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 268\u001b[0m \u001b[38;5;124;03m - **token_str** (`str`) -- The predicted token (to replace the masked one).\u001b[39;00m\n\u001b[0;32m 269\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[1;32m--> 270\u001b[0m outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28msuper\u001b[39m()\u001b[38;5;241m.\u001b[39m\u001b[38;5;21m__call__\u001b[39m(inputs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m 271\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(inputs, \u001b[38;5;28mlist\u001b[39m) \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(inputs) \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m1\u001b[39m:\n\u001b[0;32m 272\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m outputs[\u001b[38;5;241m0\u001b[39m]\n", + "File \u001b[1;32m~\\.conda\\envs\\py310\\lib\\site-packages\\transformers\\pipelines\\base.py:1243\u001b[0m, in \u001b[0;36mPipeline.__call__\u001b[1;34m(self, inputs, num_workers, batch_size, *args, **kwargs)\u001b[0m\n\u001b[0;32m 1235\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mnext\u001b[39m(\n\u001b[0;32m 1236\u001b[0m \u001b[38;5;28miter\u001b[39m(\n\u001b[0;32m 1237\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mget_iterator(\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 1240\u001b[0m )\n\u001b[0;32m 1241\u001b[0m )\n\u001b[0;32m 1242\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m-> 1243\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun_single\u001b[49m\u001b[43m(\u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mpreprocess_params\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mforward_params\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mpostprocess_params\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[1;32m~\\.conda\\envs\\py310\\lib\\site-packages\\transformers\\pipelines\\base.py:1250\u001b[0m, in \u001b[0;36mPipeline.run_single\u001b[1;34m(self, inputs, preprocess_params, forward_params, postprocess_params)\u001b[0m\n\u001b[0;32m 1248\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mrun_single\u001b[39m(\u001b[38;5;28mself\u001b[39m, inputs, preprocess_params, forward_params, postprocess_params):\n\u001b[0;32m 1249\u001b[0m model_inputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpreprocess(inputs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mpreprocess_params)\n\u001b[1;32m-> 1250\u001b[0m model_outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mforward(model_inputs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mforward_params)\n\u001b[0;32m 1251\u001b[0m outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpostprocess(model_outputs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mpostprocess_params)\n\u001b[0;32m 1252\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m outputs\n", + "File \u001b[1;32m~\\.conda\\envs\\py310\\lib\\site-packages\\transformers\\pipelines\\base.py:1150\u001b[0m, in \u001b[0;36mPipeline.forward\u001b[1;34m(self, model_inputs, **forward_params)\u001b[0m\n\u001b[0;32m 1148\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m inference_context():\n\u001b[0;32m 1149\u001b[0m model_inputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_ensure_tensor_on_device(model_inputs, device\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdevice)\n\u001b[1;32m-> 1150\u001b[0m model_outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward(model_inputs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mforward_params)\n\u001b[0;32m 1151\u001b[0m model_outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_ensure_tensor_on_device(model_outputs, device\u001b[38;5;241m=\u001b[39mtorch\u001b[38;5;241m.\u001b[39mdevice(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcpu\u001b[39m\u001b[38;5;124m\"\u001b[39m))\n\u001b[0;32m 1152\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n", + "File \u001b[1;32m~\\.conda\\envs\\py310\\lib\\site-packages\\transformers\\pipelines\\fill_mask.py:127\u001b[0m, in \u001b[0;36mFillMaskPipeline._forward\u001b[1;34m(self, model_inputs)\u001b[0m\n\u001b[0;32m 126\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_forward\u001b[39m(\u001b[38;5;28mself\u001b[39m, model_inputs):\n\u001b[1;32m--> 127\u001b[0m model_outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmodel(\u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mmodel_inputs)\n\u001b[0;32m 128\u001b[0m model_outputs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124minput_ids\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m model_inputs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124minput_ids\u001b[39m\u001b[38;5;124m\"\u001b[39m]\n\u001b[0;32m 129\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m model_outputs\n", + "File \u001b[1;32m~\\.conda\\envs\\py310\\lib\\site-packages\\torch\\nn\\modules\\module.py:1532\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m 1530\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[0;32m 1531\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m-> 1532\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n", + "File \u001b[1;32m~\\.conda\\envs\\py310\\lib\\site-packages\\torch\\nn\\modules\\module.py:1541\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m 1536\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[0;32m 1537\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[0;32m 1538\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[0;32m 1539\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[0;32m 1540\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[1;32m-> 1541\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m forward_call(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m 1543\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m 1544\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n", + "File \u001b[1;32m~\\.conda\\envs\\py310\\lib\\site-packages\\transformers\\models\\bert\\modeling_bert.py:1487\u001b[0m, in \u001b[0;36mBertForMaskedLM.forward\u001b[1;34m(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, encoder_hidden_states, encoder_attention_mask, labels, output_attentions, output_hidden_states, return_dict)\u001b[0m\n\u001b[0;32m 1478\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124mr\u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[0;32m 1479\u001b[0m \u001b[38;5;124;03mlabels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\u001b[39;00m\n\u001b[0;32m 1480\u001b[0m \u001b[38;5;124;03m Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,\u001b[39;00m\n\u001b[0;32m 1481\u001b[0m \u001b[38;5;124;03m config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the\u001b[39;00m\n\u001b[0;32m 1482\u001b[0m \u001b[38;5;124;03m loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`\u001b[39;00m\n\u001b[0;32m 1483\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[0;32m 1485\u001b[0m return_dict \u001b[38;5;241m=\u001b[39m return_dict \u001b[38;5;28;01mif\u001b[39;00m return_dict \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mconfig\u001b[38;5;241m.\u001b[39muse_return_dict\n\u001b[1;32m-> 1487\u001b[0m outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbert\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 1488\u001b[0m \u001b[43m \u001b[49m\u001b[43minput_ids\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1489\u001b[0m \u001b[43m \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1490\u001b[0m \u001b[43m \u001b[49m\u001b[43mtoken_type_ids\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtoken_type_ids\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1491\u001b[0m \u001b[43m \u001b[49m\u001b[43mposition_ids\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mposition_ids\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1492\u001b[0m \u001b[43m \u001b[49m\u001b[43mhead_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mhead_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1493\u001b[0m \u001b[43m \u001b[49m\u001b[43minputs_embeds\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43minputs_embeds\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1494\u001b[0m \u001b[43m \u001b[49m\u001b[43mencoder_hidden_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mencoder_hidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1495\u001b[0m \u001b[43m \u001b[49m\u001b[43mencoder_attention_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mencoder_attention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1496\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_attentions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1497\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_hidden_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_hidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1498\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_dict\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mreturn_dict\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1499\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 1501\u001b[0m sequence_output \u001b[38;5;241m=\u001b[39m outputs[\u001b[38;5;241m0\u001b[39m]\n\u001b[0;32m 1502\u001b[0m prediction_scores \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcls(sequence_output)\n", + "File \u001b[1;32m~\\.conda\\envs\\py310\\lib\\site-packages\\torch\\nn\\modules\\module.py:1532\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m 1530\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[0;32m 1531\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m-> 1532\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n", + "File \u001b[1;32m~\\.conda\\envs\\py310\\lib\\site-packages\\torch\\nn\\modules\\module.py:1541\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m 1536\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[0;32m 1537\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[0;32m 1538\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[0;32m 1539\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[0;32m 1540\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[1;32m-> 1541\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m forward_call(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m 1543\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m 1544\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n", + "File \u001b[1;32m~\\.conda\\envs\\py310\\lib\\site-packages\\transformers\\models\\bert\\modeling_bert.py:1137\u001b[0m, in \u001b[0;36mBertModel.forward\u001b[1;34m(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, encoder_hidden_states, encoder_attention_mask, past_key_values, use_cache, output_attentions, output_hidden_states, return_dict)\u001b[0m\n\u001b[0;32m 1130\u001b[0m \u001b[38;5;66;03m# Prepare head mask if needed\u001b[39;00m\n\u001b[0;32m 1131\u001b[0m \u001b[38;5;66;03m# 1.0 in head_mask indicate we keep the head\u001b[39;00m\n\u001b[0;32m 1132\u001b[0m \u001b[38;5;66;03m# attention_probs has shape bsz x n_heads x N x N\u001b[39;00m\n\u001b[0;32m 1133\u001b[0m \u001b[38;5;66;03m# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\u001b[39;00m\n\u001b[0;32m 1134\u001b[0m \u001b[38;5;66;03m# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\u001b[39;00m\n\u001b[0;32m 1135\u001b[0m head_mask \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mget_head_mask(head_mask, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mconfig\u001b[38;5;241m.\u001b[39mnum_hidden_layers)\n\u001b[1;32m-> 1137\u001b[0m encoder_outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mencoder\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 1138\u001b[0m \u001b[43m \u001b[49m\u001b[43membedding_output\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1139\u001b[0m \u001b[43m \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mextended_attention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1140\u001b[0m \u001b[43m \u001b[49m\u001b[43mhead_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mhead_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1141\u001b[0m \u001b[43m \u001b[49m\u001b[43mencoder_hidden_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mencoder_hidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1142\u001b[0m \u001b[43m \u001b[49m\u001b[43mencoder_attention_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mencoder_extended_attention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1143\u001b[0m \u001b[43m \u001b[49m\u001b[43mpast_key_values\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpast_key_values\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1144\u001b[0m \u001b[43m \u001b[49m\u001b[43muse_cache\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43muse_cache\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1145\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_attentions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1146\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_hidden_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_hidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1147\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_dict\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mreturn_dict\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1148\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 1149\u001b[0m sequence_output \u001b[38;5;241m=\u001b[39m encoder_outputs[\u001b[38;5;241m0\u001b[39m]\n\u001b[0;32m 1150\u001b[0m pooled_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpooler(sequence_output) \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpooler \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n", + "File \u001b[1;32m~\\.conda\\envs\\py310\\lib\\site-packages\\torch\\nn\\modules\\module.py:1532\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m 1530\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[0;32m 1531\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m-> 1532\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n", + "File \u001b[1;32m~\\.conda\\envs\\py310\\lib\\site-packages\\torch\\nn\\modules\\module.py:1541\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m 1536\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[0;32m 1537\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[0;32m 1538\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[0;32m 1539\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[0;32m 1540\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[1;32m-> 1541\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m forward_call(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m 1543\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m 1544\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n", + "File \u001b[1;32m~\\.conda\\envs\\py310\\lib\\site-packages\\transformers\\models\\bert\\modeling_bert.py:690\u001b[0m, in \u001b[0;36mBertEncoder.forward\u001b[1;34m(self, hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_values, use_cache, output_attentions, output_hidden_states, return_dict)\u001b[0m\n\u001b[0;32m 679\u001b[0m layer_outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_gradient_checkpointing_func(\n\u001b[0;32m 680\u001b[0m layer_module\u001b[38;5;241m.\u001b[39m\u001b[38;5;21m__call__\u001b[39m,\n\u001b[0;32m 681\u001b[0m hidden_states,\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 687\u001b[0m output_attentions,\n\u001b[0;32m 688\u001b[0m )\n\u001b[0;32m 689\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m--> 690\u001b[0m layer_outputs \u001b[38;5;241m=\u001b[39m \u001b[43mlayer_module\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 691\u001b[0m \u001b[43m \u001b[49m\u001b[43mhidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 692\u001b[0m \u001b[43m \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 693\u001b[0m \u001b[43m \u001b[49m\u001b[43mlayer_head_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 694\u001b[0m \u001b[43m \u001b[49m\u001b[43mencoder_hidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 695\u001b[0m \u001b[43m \u001b[49m\u001b[43mencoder_attention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 696\u001b[0m \u001b[43m \u001b[49m\u001b[43mpast_key_value\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 697\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 698\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 700\u001b[0m hidden_states \u001b[38;5;241m=\u001b[39m layer_outputs[\u001b[38;5;241m0\u001b[39m]\n\u001b[0;32m 701\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m use_cache:\n", + "File \u001b[1;32m~\\.conda\\envs\\py310\\lib\\site-packages\\torch\\nn\\modules\\module.py:1532\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m 1530\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[0;32m 1531\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m-> 1532\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n", + "File \u001b[1;32m~\\.conda\\envs\\py310\\lib\\site-packages\\torch\\nn\\modules\\module.py:1541\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m 1536\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[0;32m 1537\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[0;32m 1538\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[0;32m 1539\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[0;32m 1540\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[1;32m-> 1541\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m forward_call(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m 1543\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m 1544\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n", + "File \u001b[1;32m~\\.conda\\envs\\py310\\lib\\site-packages\\transformers\\models\\bert\\modeling_bert.py:580\u001b[0m, in \u001b[0;36mBertLayer.forward\u001b[1;34m(self, hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions)\u001b[0m\n\u001b[0;32m 568\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\n\u001b[0;32m 569\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[0;32m 570\u001b[0m hidden_states: torch\u001b[38;5;241m.\u001b[39mTensor,\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 577\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Tuple[torch\u001b[38;5;241m.\u001b[39mTensor]:\n\u001b[0;32m 578\u001b[0m \u001b[38;5;66;03m# decoder uni-directional self-attention cached key/values tuple is at positions 1,2\u001b[39;00m\n\u001b[0;32m 579\u001b[0m self_attn_past_key_value \u001b[38;5;241m=\u001b[39m past_key_value[:\u001b[38;5;241m2\u001b[39m] \u001b[38;5;28;01mif\u001b[39;00m past_key_value \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m--> 580\u001b[0m self_attention_outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mattention\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 581\u001b[0m \u001b[43m \u001b[49m\u001b[43mhidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 582\u001b[0m \u001b[43m \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 583\u001b[0m \u001b[43m \u001b[49m\u001b[43mhead_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 584\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_attentions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 585\u001b[0m \u001b[43m \u001b[49m\u001b[43mpast_key_value\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mself_attn_past_key_value\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 586\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 587\u001b[0m attention_output \u001b[38;5;241m=\u001b[39m self_attention_outputs[\u001b[38;5;241m0\u001b[39m]\n\u001b[0;32m 589\u001b[0m \u001b[38;5;66;03m# if decoder, the last output is tuple of self-attn cache\u001b[39;00m\n", + "File \u001b[1;32m~\\.conda\\envs\\py310\\lib\\site-packages\\torch\\nn\\modules\\module.py:1532\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m 1530\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[0;32m 1531\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m-> 1532\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n", + "File \u001b[1;32m~\\.conda\\envs\\py310\\lib\\site-packages\\torch\\nn\\modules\\module.py:1541\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m 1536\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[0;32m 1537\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[0;32m 1538\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[0;32m 1539\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[0;32m 1540\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[1;32m-> 1541\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m forward_call(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m 1543\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m 1544\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n", + "File \u001b[1;32m~\\.conda\\envs\\py310\\lib\\site-packages\\transformers\\models\\bert\\modeling_bert.py:510\u001b[0m, in \u001b[0;36mBertAttention.forward\u001b[1;34m(self, hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions)\u001b[0m\n\u001b[0;32m 500\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\n\u001b[0;32m 501\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[0;32m 502\u001b[0m hidden_states: torch\u001b[38;5;241m.\u001b[39mTensor,\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 508\u001b[0m output_attentions: Optional[\u001b[38;5;28mbool\u001b[39m] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mFalse\u001b[39;00m,\n\u001b[0;32m 509\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Tuple[torch\u001b[38;5;241m.\u001b[39mTensor]:\n\u001b[1;32m--> 510\u001b[0m self_outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mself\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 511\u001b[0m \u001b[43m \u001b[49m\u001b[43mhidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 512\u001b[0m \u001b[43m \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 513\u001b[0m \u001b[43m \u001b[49m\u001b[43mhead_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 514\u001b[0m \u001b[43m \u001b[49m\u001b[43mencoder_hidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 515\u001b[0m \u001b[43m \u001b[49m\u001b[43mencoder_attention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 516\u001b[0m \u001b[43m \u001b[49m\u001b[43mpast_key_value\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 517\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 518\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 519\u001b[0m attention_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39moutput(self_outputs[\u001b[38;5;241m0\u001b[39m], hidden_states)\n\u001b[0;32m 520\u001b[0m outputs \u001b[38;5;241m=\u001b[39m (attention_output,) \u001b[38;5;241m+\u001b[39m self_outputs[\u001b[38;5;241m1\u001b[39m:] \u001b[38;5;66;03m# add attentions if we output them\u001b[39;00m\n", + "File \u001b[1;32m~\\.conda\\envs\\py310\\lib\\site-packages\\torch\\nn\\modules\\module.py:1532\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m 1530\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[0;32m 1531\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m-> 1532\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n", + "File \u001b[1;32m~\\.conda\\envs\\py310\\lib\\site-packages\\torch\\nn\\modules\\module.py:1541\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m 1536\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[0;32m 1537\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[0;32m 1538\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[0;32m 1539\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[0;32m 1540\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[1;32m-> 1541\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m forward_call(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m 1543\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m 1544\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n", + "File \u001b[1;32m~\\.conda\\envs\\py310\\lib\\site-packages\\transformers\\models\\bert\\modeling_bert.py:435\u001b[0m, in \u001b[0;36mBertSdpaSelfAttention.forward\u001b[1;34m(self, hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions)\u001b[0m\n\u001b[0;32m 431\u001b[0m \u001b[38;5;66;03m# The tgt_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal\u001b[39;00m\n\u001b[0;32m 432\u001b[0m \u001b[38;5;66;03m# mask in case tgt_len == 1.\u001b[39;00m\n\u001b[0;32m 433\u001b[0m is_causal \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mis_decoder \u001b[38;5;129;01mand\u001b[39;00m attention_mask \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m tgt_len \u001b[38;5;241m>\u001b[39m \u001b[38;5;241m1\u001b[39m\n\u001b[1;32m--> 435\u001b[0m attn_output \u001b[38;5;241m=\u001b[39m \u001b[43mtorch\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mnn\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfunctional\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mscaled_dot_product_attention\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 436\u001b[0m \u001b[43m \u001b[49m\u001b[43mquery_layer\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 437\u001b[0m \u001b[43m \u001b[49m\u001b[43mkey_layer\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 438\u001b[0m \u001b[43m \u001b[49m\u001b[43mvalue_layer\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 439\u001b[0m \u001b[43m \u001b[49m\u001b[43mattn_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 440\u001b[0m \u001b[43m \u001b[49m\u001b[43mdropout_p\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdropout_prob\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtraining\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01melse\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;241;43m0.0\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[0;32m 441\u001b[0m \u001b[43m \u001b[49m\u001b[43mis_causal\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mis_causal\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 442\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 444\u001b[0m attn_output \u001b[38;5;241m=\u001b[39m attn_output\u001b[38;5;241m.\u001b[39mtranspose(\u001b[38;5;241m1\u001b[39m, \u001b[38;5;241m2\u001b[39m)\n\u001b[0;32m 445\u001b[0m attn_output \u001b[38;5;241m=\u001b[39m attn_output\u001b[38;5;241m.\u001b[39mreshape(bsz, tgt_len, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mall_head_size)\n", + "\u001b[1;31mKeyboardInterrupt\u001b[0m: " + ] + } + ], + "source": [ + "list_of_significance = []\n", + "list_of_significance_watermarked = []\n", + "count_t = 0\n", + "for text in texts:\n", + " count_t+=1\n", + " print(\"___________________________________________________________________________________________________________________________\")\n", + " print(\"Doing\", count_t)\n", + " print(\"___________________________________________________________________________________________________________________________\")\n", + "\n", + " words_to_add = [\"example\", \"test\", \"random\", \"insert\"]\n", + " num_words_to_add = 5\n", + "\n", + " # modified_text = randomly_add_words(text, words_to_add, num_words_to_add)\n", + " modified_text = randomly_add_words(watermark_text(text, offset=0), words_to_add, num_words_to_add)\n", + " # print(\"Original Text:\")\n", + " # print(text)\n", + " # print(\"\\nModified Text:\")\n", + " # print(modified_text)\n", + "\n", + " match_ratios = watermark_text_and_calculate_matches(modified_text, max_offset=5)\n", + " # print(match_ratios)\n", + " list_of_significance_watermarked.append(check_significant_difference(match_ratios))\n", + "\n", + " match_ratios = watermark_text_and_calculate_matches(text, max_offset=5)\n", + " list_of_significance.append(check_significant_difference(match_ratios))\n", + "\n", + " print(\"___________________________________________________________________________________________________________________________\")\n", + " print(\"Done\", count_t, )\n", + " print(\"___________________________________________________________________________________________________________________________\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 47, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "TQd_XP_kRZ3r", + "outputId": "4f585fea-470c-4f09-8397-182c269af1ee" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "[[0.5789473684210527, 0.4375, -11.315789473684214, 0.0014802633181549077],\n", + " [0.7368421052631579,\n", + " 0.4992690058479532,\n", + " -4.1686084498315,\n", + " 0.02512588315462554],\n", + " [0.631578947368421,\n", + " 0.47368421052631576,\n", + " -2.5980762113533156,\n", + " 0.12168993434632014],\n", + " [0.6666666666666666,\n", + " 0.5051169590643274,\n", + " -3.25528426992502,\n", + " 0.047299956469803],\n", + " [0.5454545454545454,\n", + " 0.44047619047619047,\n", + " -1.9884454013529376,\n", + " 0.1408964116764383],\n", + " [0.7272727272727273,\n", + " 0.5454545454545454,\n", + " -2.529822128134705,\n", + " 0.08543743614799877],\n", + " [0.5, 0.4285714285714286, -3.674234614174766, 0.034896984510150934],\n", + " [0.45, 0.36140350877192984, -1.789925042646048, 0.21535497619213528],\n", + " [0.6363636363636364,\n", + " 0.5524891774891776,\n", + " -4.925394256602069,\n", + " 0.01603915968463389],\n", + " [0.6363636363636364,\n", + " 0.5004940711462451,\n", + " -2.8968775241076448,\n", + " 0.0626611732957653],\n", + " [0.75, 0.5776315789473684, -4.168368422873468, 0.02512970789136552],\n", + " [0.6818181818181818,\n", + " 0.5568181818181819,\n", + " -3.666666666666662,\n", + " 0.03508151471548204],\n", + " [0.7391304347826086,\n", + " 0.42984189723320154,\n", + " -7.289590560310877,\n", + " 0.005329596912408047],\n", + " [0.782608695652174,\n", + " 0.4936594202898551,\n", + " -7.972508980104777,\n", + " 0.004117361652430399],\n", + " [0.6363636363636364,\n", + " 0.5113636363636364,\n", + " -3.22047024073016,\n", + " 0.04856685655980099],\n", + " [0.6521739130434783,\n", + " 0.5434782608695652,\n", + " -2.3797114365109158,\n", + " 0.09764327274027122],\n", + " [0.631578947368421,\n", + " 0.47368421052631576,\n", + " -2.5980762113533156,\n", + " 0.12168993434632014],\n", + " [0.7142857142857143,\n", + " 0.5367965367965368,\n", + " -2.3442928638434024,\n", + " 0.10082728660926546],\n", + " [0.47619047619047616,\n", + " 0.38095238095238093,\n", + " -3.4641016151377544,\n", + " 0.07417990022744853],\n", + " [0.5, 0.4285714285714286, -3.674234614174766, 0.034896984510150934],\n", + " [0.5454545454545454,\n", + " 0.4631093544137022,\n", + " -4.2649449620933755,\n", + " 0.05082148124684452],\n", + " [0.782608695652174,\n", + " 0.4936594202898551,\n", + " -7.972508980104777,\n", + " 0.004117361652430399],\n", + " [0.6363636363636364,\n", + " 0.5113636363636364,\n", + " -3.22047024073016,\n", + " 0.04856685655980099],\n", + " [0.6521739130434783,\n", + " 0.5434782608695652,\n", + " -2.3797114365109158,\n", + " 0.09764327274027122],\n", + " [0.631578947368421,\n", + " 0.47368421052631576,\n", + " -2.5980762113533156,\n", + " 0.12168993434632014],\n", + " [0.7142857142857143,\n", + " 0.5367965367965368,\n", + " -2.3442928638434024,\n", + " 0.10082728660926546],\n", + " [0.47619047619047616,\n", + " 0.38095238095238093,\n", + " -3.4641016151377544,\n", + " 0.07417990022744853],\n", + " [0.5, 0.4285714285714286, -3.674234614174766, 0.034896984510150934],\n", + " [0.5454545454545454,\n", + " 0.4631093544137022,\n", + " -4.2649449620933755,\n", + " 0.05082148124684452]]" + ] + }, + "execution_count": 47, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 49, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "gm9oCHWlSi4I", + "outputId": "c192b477-1e37-4336-f126-2f77e44eb6f6" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Ratio Average Others T-Statistic P-Value || Highest Ratio Average Others T-Statistic P-Value \n", + "0.5789473684210527 0.4375 -11.315789473684214 0.0014802633181549077 || 0.65 0.41666666666666663 -5.17705132919467 0.013988180239752648\n", + "0.7368421052631579 0.4992690058479532 -4.1686084498315 0.02512588315462554 || 0.631578947368421 0.5072368421052631 -3.7039840906304633 0.034183520845761046\n", + "0.631578947368421 0.47368421052631576 -2.5980762113533156 0.12168993434632014 || 0.8 0.44999999999999996 -9.899494936611665 0.002192318898657741\n", + "0.6666666666666666 0.5051169590643274 -3.25528426992502 0.047299956469803 || 0.631578947368421 0.5052631578947369 -3.3070695276573017 0.04549183755402306 \n", + "0.5454545454545454 0.44047619047619047 -1.9884454013529376 0.1408964116764383 || 0.6363636363636364 0.3932806324110672 -5.671556095740365 0.010858631561421467\n", + "0.7272727272727273 0.5454545454545454 -2.529822128134705 0.08543743614799877 || 0.782608695652174 0.5 -3.0929011843007626 0.0535919356301439 \n", + "0.5 0.4285714285714286 -3.674234614174766 0.034896984510150934 || 0.5454545454545454 0.39377470355731226 -3.7778595133554176 0.032490871457917674\n", + "0.45 0.36140350877192984 -1.789925042646048 0.21535497619213528 || 0.5714285714285714 0.407936507936508 -2.525754294555235 0.12746322930311096 \n", + "0.6363636363636364 0.5524891774891776 -4.925394256602069 0.01603915968463389 || 0.7272727272727273 0.5093873517786561 -4.676780667650381 0.018467037431746196\n", + "0.6363636363636364 0.5004940711462451 -2.8968775241076448 0.0626611732957653 || 0.7083333333333334 0.5040760869565217 -3.3948179538648735 0.042623438183825496\n", + "0.75 0.5776315789473684 -4.168368422873468 0.02512970789136552 || 0.6190476190476191 0.5369047619047619 -3.7736294416002862 0.03258485403885965 \n", + "0.6818181818181818 0.5568181818181819 -3.666666666666662 0.03508151471548204 || 0.6956521739130435 0.532608695652174 -3.382407126012729 0.043014906734981546\n", + "0.7391304347826086 0.42984189723320154 -7.289590560310877 0.005329596912408047 || 0.7083333333333334 0.4433876811594203 -4.490136077665652 0.02061159932091642 \n", + "0.782608695652174 0.4936594202898551 -7.972508980104777 0.004117361652430399 || 0.6666666666666666 0.5354166666666667 -5.5468407098514305 0.011553575011403559\n", + "0.6363636363636364 0.5113636363636364 -3.22047024073016 0.04856685655980099 || 0.6086956521739131 0.47826086956521735 -4.242640687119289 0.023981199790656615\n", + "0.6521739130434783 0.5434782608695652 -2.3797114365109158 0.09764327274027122 || 0.8333333333333334 0.46557971014492755 -4.234837745291732 0.02409863068609194 \n", + "0.631578947368421 0.47368421052631576 -2.5980762113533156 0.12168993434632014 || 0.6 0.48333333333333334 -3.4999999999999987 0.07282735005446936 \n", + "0.7142857142857143 0.5367965367965368 -2.3442928638434024 0.10082728660926546 || 0.782608695652174 0.549901185770751 -4.594812178568088 0.01937136230021868 \n", + "0.47619047619047616 0.38095238095238093 -3.4641016151377544 0.07417990022744853 || 0.6363636363636364 0.4090909090909091 -4.08248290463863 0.026547885467199484\n", + "0.5 0.4285714285714286 -3.674234614174766 0.034896984510150934 || 0.7272727272727273 0.42539525691699603 -4.9868551538544414 0.015503886330756058\n", + "0.5454545454545454 0.4631093544137022 -4.2649449620933755 0.05082148124684452 || 0.6956521739130435 0.44157608695652173 -7.251548965980652 0.0054102533801680865\n", + "0.782608695652174 0.4936594202898551 -7.972508980104777 0.004117361652430399 || 0.625 0.49222222222222217 -3.9180327868852447 0.05939767081769266 \n", + "0.6363636363636364 0.5113636363636364 -3.22047024073016 0.04856685655980099 || 0.5217391304347826 0.44565217391304346 -3.6556307750696546 0.03535284700251738 \n", + "0.6521739130434783 0.5434782608695652 -2.3797114365109158 0.09764327274027122 || 0.6666666666666666 0.5 -13.279056191361398 0.005623287315631082\n", + "0.631578947368421 0.47368421052631576 -2.5980762113533156 0.12168993434632014 || 0.7 0.475 -8.999999999999995 0.0028958121618641495\n", + "0.7142857142857143 0.5367965367965368 -2.3442928638434024 0.10082728660926546 || 0.7391304347826086 0.5602766798418972 -3.356266857779692 0.04385449037496923 \n", + "0.47619047619047616 0.38095238095238093 -3.4641016151377544 0.07417990022744853 || 0.5 0.38636363636363635 -2.611164839335468 0.07960498081790623 \n", + "0.5 0.4285714285714286 -3.674234614174766 0.034896984510150934 || 0.6086956521739131 0.44318181818181823 -4.855072463768116 0.01668150816820796 \n", + "0.5454545454545454 0.4631093544137022 -4.2649449620933755 0.05082148124684452 || 0.6666666666666666 0.4673913043478261 -2.77438299767925 0.0693145043773778 \n" + ] + } + ], + "source": [ + "print(f\"{'Highest Ratio':<20} {'Average Others':<20} {'T-Statistic':<20} {'P-Value':<20} || {'Highest Ratio':<20} {'Average Others':<20} {'T-Statistic':<20} {'P-Value':<20}\")\n", + "\n", + "# Print each pair of lists side by side\n", + "for sig, wm_sig in zip(list_of_significance, list_of_significance_watermarked):\n", + " print(f\"{sig[0]:<20} {sig[1]:<20} {sig[2]:<20} {sig[3]:<20} || {wm_sig[0]:<20} {wm_sig[1]:<20} {wm_sig[2]:<20} {wm_sig[3]:<20}\")" + ] + }, + { + "cell_type": "code", + "execution_count": 46, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 1000 + }, + "id": "brgJ75fzTzry", + "outputId": "59e7ac15-cb87-4abc-b421-144e302e6b1b" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "First few rows of the DataFrame:\n", + " Highest Ratio Average Others T-Statistic P-Value Label\n", + "0 0.233333 0.182203 -3.532758 0.038563 Original\n", + "1 0.203390 0.139195 -3.440591 0.041218 Original\n", + "2 0.338983 0.270339 -2.228608 0.112142 Original\n", + "3 0.254237 0.168362 -2.451613 0.246559 Original\n", + "4 0.288136 0.210876 -5.467540 0.012026 Original\n", + "\n", + "Statistical Summary:\n", + " Highest Ratio Average Others T-Statistic P-Value\n", + "count 4000.000000 4000.000000 3999.000000 3999.000000\n", + "mean 0.490285 0.339968 -6.076672 0.036783\n", + "std 0.128376 0.082900 5.580957 0.043217\n", + "min 0.101695 0.066667 -111.524590 0.000002\n", + "25% 0.416667 0.296610 -6.938964 0.006418\n", + "50% 0.491525 0.354732 -4.431515 0.021973\n", + "75% 0.573770 0.398224 -3.176861 0.052069\n", + "max 0.868852 0.580601 -1.166065 0.451288\n", + "\n", + "Missing Values:\n", + "Highest Ratio 0\n", + "Average Others 0\n", + "T-Statistic 1\n", + "P-Value 1\n", + "Label 0\n", + "dtype: int64\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA/AAAAK9CAYAAACD7ocIAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAADYaklEQVR4nOzdd3hUZd7G8XvSeyeVQELovUiJIBBA6TZ8FQuoy6qrgKKua++Fde0F2+qKjbWtoiK9KlV6b8HQSUKAJKSXOe8fQ0YiNWGSM5N8P9c1V2bOnHnOb0I0uedpFsMwDAEAAAAAAKfmZnYBAAAAAADg3AjwAAAAAAC4AAI8AAAAAAAugAAPAAAAAIALIMADAAAAAOACCPAAAAAAALgAAjwAAAAAAC6AAA8AAAAAgAsgwAMAAAAA4AII8AAAp/fUU0/JYrHUyrX69u2rvn372h8vXLhQFotF3377ba1c/5ZbblFCQkKtXKu68vLy9Ne//lXR0dGyWCyaMGGCw9qePHmyLBaLdu/eXe3Xrlq1ymH1uLrdu3fLYrFo8uTJZpcCAHAAAjwAoFZVhKyKm4+Pj2JjYzVw4EC9+eabOn78uEOuc/DgQT311FNat26dQ9pzJGeu7Xy88MILmjx5su6880599tlnGjVq1BnPTUhI0LBhw077XG1/OFJT3nnnnSoF5JN//i0Wi4KCgtSnTx/9/PPP1a5hypQpev3116v9egCAa/AwuwAAQP30zDPPKDExUaWlpUpPT9fChQs1YcIEvfrqq/rxxx/Vvn17+7mPPfaYHnrooSq1f/DgQT399NNKSEhQx44dz/t1s2fPrtJ1quNstf373/+W1Wqt8RouxPz589WjRw89+eSTDm971KhRGjlypLy9vR3edk155513FBERoVtuueW8X3PppZdq9OjRMgxDe/bs0bvvvqvhw4drxowZGjhwYJVrmDJlijZt2nTKaIjGjRursLBQnp6eVW4TAOB8CPAAAFMMHjxYF110kf3xww8/rPnz52vYsGG6/PLLtXXrVvn6+kqSPDw85OFRs7+yCgoK5OfnJy8vrxq9zrm4QtDKzMxU69ata6Rtd3d3ubu710jbzqR58+a66aab7I9HjBih1q1b64033qhWgD+TilEuAIC6gSH0AACn0a9fPz3++OPas2ePPv/8c/vx082BnzNnjnr16qWQkBAFBASoRYsWeuSRRyTZhmZ37dpVknTrrbfahypXDHPu27ev2rZtq9WrV6t3797y8/Ozv/bPc+ArlJeX65FHHlF0dLT8/f11+eWXa9++fZXOSUhIOG0v7Mltnqu2082Bz8/P1/3336/4+Hh5e3urRYsWevnll2UYRqXzLBaLxo0bp6lTp6pt27by9vZWmzZtNHPmzNN/w/8kMzNTY8aMUVRUlHx8fNShQwd98skn9ucrhrynpaXp559/ttdenfnqZ3K6OfBWq1VPPfWUYmNj5efnp5SUFG3ZsuWM3+/i4mLdd999atCggfz9/XXVVVfp8OHDp5w3Y8YMXXLJJfL391dgYKCGDh2qzZs3VzonPT1dt956qxo2bChvb2/FxMToiiuusNeXkJCgzZs3a9GiRfbvx+l+fs6lVatWioiI0K5duyod/+GHHzR06FDFxsbK29tbSUlJevbZZ1VeXm4/p2/fvvr555+1Z88eew0VP0NnmgM/f/58+3sPCQnRFVdcoa1bt1a5bgBA7aIHHgDgVEaNGqVHHnlEs2fP1m233XbaczZv3qxhw4apffv2euaZZ+Tt7a3U1FQtWbJEki0MPfPMM3riiSd0++2365JLLpEkXXzxxfY2jhw5osGDB2vkyJG66aabFBUVdda6nn/+eVksFj344IPKzMzU66+/rgEDBmjdunX2kQLn43xqO5lhGLr88su1YMECjRkzRh07dtSsWbP0wAMP6MCBA3rttdcqnb948WJ99913uuuuuxQYGKg333xTI0aM0N69exUeHn7GugoLC9W3b1+lpqZq3LhxSkxM1DfffKNbbrlF2dnZuueee9SqVSt99tlnuvfee9WwYUPdf//9kqQGDRqc9T2XlpYqKyvrlOM5OTlnfV2Fhx9+WP/61780fPhwDRw4UOvXr9fAgQNVVFR02vPHjx+v0NBQPfnkk9q9e7def/11jRs3Tl999ZX9nM8++0w333yzBg4cqBdffFEFBQV699131atXL61du9YegEeMGKHNmzdr/PjxSkhIUGZmpubMmaO9e/cqISFBr7/+usaPH6+AgAA9+uijknTOn6XTycnJ0bFjx5SUlFTp+OTJkxUQEKD77rtPAQEBmj9/vp544gnl5ubqpZdekiQ9+uijysnJ0f79++0/DwEBAWe81ty5czV48GA1adJETz31lAoLC/XWW2+pZ8+eWrNmjdMvoggA9ZoBAEAt+vjjjw1JxsqVK894TnBwsNGpUyf74yeffNI4+VfWa6+9ZkgyDh8+fMY2Vq5caUgyPv7441Oe69OnjyHJeO+99077XJ8+feyPFyxYYEgy4uLijNzcXPvxr7/+2pBkvPHGG/ZjjRs3Nm6++eZztnm22m6++WajcePG9sdTp041JBnPPfdcpfOuueYaw2KxGKmpqfZjkgwvL69Kx9avX29IMt56661TrnWy119/3ZBkfP755/ZjJSUlRnJyshEQEFDpvTdu3NgYOnToWds7+VxJZ71988039vMrfj7S0tIMwzCM9PR0w8PDw7jyyisrtfvUU08Zkip9vyteO2DAAMNqtdqP33vvvYa7u7uRnZ1tGIZhHD9+3AgJCTFuu+22Sm2mp6cbwcHB9uPHjh0zJBkvvfTSWd9jmzZtKv37noskY8yYMcbhw4eNzMxMY9WqVcagQYNOe62CgoJTXn/HHXcYfn5+RlFRkf3Y0KFDK/3cVEhLSzvlZ61jx45GZGSkceTIEfux9evXG25ubsbo0aPP+30AAGofQ+gBAE4nICDgrKvRh4SESLINL67ugm/e3t669dZbz/v80aNHKzAw0P74mmuuUUxMjKZPn16t65+v6dOny93dXXfffXel4/fff78Mw9CMGTMqHR8wYEClXtz27dsrKChIv//++zmvEx0dreuvv95+zNPTU3fffbfy8vK0aNGiar+H7t27a86cOafcXn755XO+dt68eSorK9Ndd91V6fj48ePP+Jrbb7+90pSLSy65ROXl5dqzZ48k2/SL7OxsXX/99crKyrLf3N3d1b17dy1YsECS5OvrKy8vLy1cuFDHjh2rzls/o48++kgNGjRQZGSkLrroIs2bN0//+Mc/dN9991U67+TRHcePH1dWVpYuueQSFRQUaNu2bVW+7qFDh7Ru3TrdcsstCgsLsx9v3769Lr300hr/eQYAXBiG0AMAnE5eXp4iIyPP+Px1112nDz/8UH/961/10EMPqX///rr66qt1zTXXyM3t/D6bjouLq9KCdc2aNav02GKxqGnTpg6d/306e/bsUWxsbKUPDyTbUPyK50/WqFGjU9oIDQ09ZwDds2ePmjVrdsr370zXqYqIiAgNGDDglOPnszBhxXWbNm1a6XhYWJhCQ0NP+5o/fw8qzqv4HuzcuVOSbc2F0wkKCpJk+5DnxRdf1P3336+oqCj16NFDw4YN0+jRoxUdHX3O2s/miiuu0Lhx41RSUqKVK1fqhRdeUEFBwSnf/82bN+uxxx7T/PnzlZubW+m5852CcLKK72eLFi1Oea5Vq1aaNWuW8vPz5e/vX+W2AQA1jwAPAHAq+/fvV05OzimB7WS+vr765ZdftGDBAv3888+aOXOmvvrqK/Xr10+zZ88+r1XMqzJv/Xz9eaG9CuXl5bW2svqZrmP8acG7uuxc34OKURufffbZaYP4yR8sTJgwQcOHD9fUqVM1a9YsPf7445o4caLmz5+vTp06VbvGhg0b2j/UGDJkiCIiIjRu3DilpKTo6quvliRlZ2erT58+CgoK0jPPPKOkpCT5+PhozZo1evDBB51+u0EAgOMxhB4A4FQ+++wzSTrnVlpubm7q37+/Xn31VW3ZskXPP/+85s+fbx/+fKYwXV0VvbYVDMNQampqpQW/QkNDlZ2dfcpr/9x7XZXaGjdurIMHD54ypaBi+HTjxo3Pu61zXWfnzp2nhEJHX6eqKq6bmppa6fiRI0eqPay9YopBZGSkBgwYcMrtz6vIJyUl6f7779fs2bO1adMmlZSU6JVXXrE/74iftTvuuENJSUl67LHH7B80LFy4UEeOHNHkyZN1zz33aNiwYRowYMBpRx6cbw0V38/t27ef8ty2bdsUERFB7zsAODECPADAacyfP1/PPvusEhMTdeONN57xvKNHj55yrGPHjpJsW4hJsoeQ0wXq6vj0008rhehvv/1Whw4d0uDBg+3HkpKStHz5cpWUlNiPTZs27ZTt5qpS25AhQ1ReXq6333670vHXXntNFoul0vUvxJAhQ5Senl5ppfaysjK99dZbCggIUJ8+fRxynarq37+/PDw89O6771Y6/ufvR1UMHDhQQUFBeuGFF1RaWnrK8xVbzhUUFJyy0n1SUpICAwPtP2eS7d/zQn/OPDw8dP/992vr1q364YcfJP0xkuDk0RMlJSV65513Tnm9v7//eQ2pj4mJUceOHfXJJ59UqnnTpk2aPXu2hgwZckHvAwBQsxhCDwAwxYwZM7Rt2zaVlZUpIyND8+fP15w5c9S4cWP9+OOP8vHxOeNrn3nmGf3yyy8aOnSoGjdurMzMTL3zzjtq2LChevXqJckWtEJCQvTee+8pMDBQ/v7+6t69uxITE6tVb1hYmHr16qVbb71VGRkZev3119W0adNKW9399a9/1bfffqtBgwbp2muv1a5du/T555+fsjVYVWobPny4UlJS9Oijj2r37t3q0KGDZs+erR9++EETJkw4pe3quv322/X+++/rlltu0erVq5WQkKBvv/1WS5Ys0euvv37KHPzaEhUVpXvuuUevvPKKLr/8cg0aNEjr16/XjBkzFBERUa3e76CgIL377rsaNWqUOnfurJEjR6pBgwbau3evfv75Z/Xs2VNvv/22duzYof79++vaa69V69at5eHhoe+//14ZGRkaOXKkvb0uXbro3Xff1XPPPaemTZsqMjLyjPPrz+aWW27RE088oRdffFFXXnmlLr74YoWGhurmm2/W3XffLYvFos8+++y00yG6dOmir776Svfdd5+6du2qgIAADR8+/LTXeemllzR48GAlJydrzJgx9m3kgoOD9dRTT1W5bgBALTJxBXwAQD1UsdVXxc3Ly8uIjo42Lr30UuONN96otF1ZhT9vIzdv3jzjiiuuMGJjYw0vLy8jNjbWuP76640dO3ZUet0PP/xgtG7d2vDw8Ki0lVafPn2MNm3anLa+M20j99///td4+OGHjcjISMPX19cYOnSosWfPnlNe/8orrxhxcXGGt7e30bNnT2PVqlWntHm22v68jZxh2LY9u/fee43Y2FjD09PTaNasmfHSSy9V2irNMGzbk40dO/aUms60vd2fZWRkGLfeeqsRERFheHl5Ge3atTvtVndV3UbuTOdWfG/Pto2cYRhGWVmZ8fjjjxvR0dGGr6+v0a9fP2Pr1q1GeHi48be//e2U1/55i8KK6yxYsOCU4wMHDjSCg4MNHx8fIykpybjllluMVatWGYZhGFlZWcbYsWONli1bGv7+/kZwcLDRvXt34+uvv67UTnp6ujF06FAjMDDQkHTOLeXO9O9kGH9sj1dR65IlS4wePXoYvr6+RmxsrPGPf/zDmDVr1invJy8vz7jhhhuMkJAQQ5L9Z+h028gZhmHMnTvX6Nmzp+Hr62sEBQUZw4cPN7Zs2XLWugEA5rMYRj1a1QYAANQJ2dnZCg0N1XPPPadHH33U7HIAAKgVzIEHAABOrbCw8JRjr7/+uiSdsuAcAAB1GXPgAQCAU/vqq680efJkDRkyRAEBAVq8eLH++9//6rLLLlPPnj3NLg8AgFpDgAcAAE6tffv28vDw0L/+9S/l5ubaF7Z77rnnzC4NAIBaxRx4AAAAAABcAHPgAQAAAABwAQR4AAAAAABcAHPgJVmtVh08eFCBgYGyWCxmlwMAAAAAqOMMw9Dx48cVGxsrN7fz61snwEs6ePCg4uPjzS4DAAAAAFDP7Nu3Tw0bNjyvcwnwkgIDAyXZvnFBQUEmVwMAAAAAqOtyc3MVHx9vz6PngwAv2YfNBwUFEeABAAAAALWmKtO4WcQOAAAAAAAXQIAHAAAAAMAFEOABAAAAAHABzIEHAAAAgHMwDENlZWUqLy83uxS4CHd3d3l4eDh0q3ICPAAAAACcRUlJiQ4dOqSCggKzS4GL8fPzU0xMjLy8vBzSHgEeAAAAAM7AarUqLS1N7u7uio2NlZeXl0N7VFE3GYahkpISHT58WGlpaWrWrJnc3C58BjsBHgAAAADOoKSkRFarVfHx8fLz8zO7HLgQX19feXp6as+ePSopKZGPj88Ft8kidgAAAABwDo7oPUX94+ifG34KAQAAAABwAQR4AAAAAABcAAEeAAAAAFAlkydPVkhIyAW3Y7FYNHXq1Atup74gwAMAAABAPXTLLbfoyiuvNLsMVAEBHgAAAAAAF0CABwAAAABU8uqrr6pdu3by9/dXfHy87rrrLuXl5Z1y3tSpU9WsWTP5+Pho4MCB2rdvX6Xnf/jhB3Xu3Fk+Pj5q0qSJnn76aZWVldXW26hzCPAAAAAAgErc3Nz05ptvavPmzfrkk080f/58/eMf/6h0TkFBgZ5//nl9+umnWrJkibKzszVy5Ej787/++qtGjx6te+65R1u2bNH777+vyZMn6/nnn6/tt1NnEOABAAAAAJVMmDBBKSkpSkhIUL9+/fTcc8/p66+/rnROaWmp3n77bSUnJ6tLly765JNPtHTpUv3222+SpKeffloPPfSQbr75ZjVp0kSXXnqpnn32Wb3//vtmvKU6wcPsAgAAAAAAzmXu3LmaOHGitm3bptzcXJWVlamoqEgFBQXy8/OTJHl4eKhr167217Rs2VIhISHaunWrunXrpvXr12vJkiWVetzLy8tPaQfnjwAPAAAAALDbvXu3hg0bpjvvvFPPP/+8wsLCtHjxYo0ZM0YlJSXnHbzz8vL09NNP6+qrrz7lOR8fH0eXXS8Q4AEAAAAAdqtXr5bVatUrr7wiNzfbrOs/D5+XpLKyMq1atUrdunWTJG3fvl3Z2dlq1aqVJKlz587avn27mjZtWnvF13EEeAAAAACop3JycrRu3bpKxyIiIlRaWqq33npLw4cP15IlS/Tee++d8lpPT0+NHz9eb775pjw8PDRu3Dj16NHDHuifeOIJDRs2TI0aNdI111wjNzc3rV+/Xps2bdJzzz1XG2+vzmEROwAAAACopxYuXKhOnTpVun322Wd69dVX9eKLL6pt27b64osvNHHixFNe6+fnpwcffFA33HCDevbsqYCAAH311Vf25wcOHKhp06Zp9uzZ6tq1q3r06KHXXntNjRs3rs23WKdYDMMwzC7CbLm5uQoODlZOTo6CgoLMLgcAAACAkygqKlJaWpoSExOZt40qO9vPT3VyKD3wAAAAAAC4AAI8AAAAAAAugAAPAAAAAIALYBV6AABcWFpWvvKLyxzerr+3hxIj/B3eLgAAqD4CPAAALiotK18pLy+ssfYX/L0vIR4AACdCgAcAwEVV9LyPTWmquBBfh7V7ILtQkxak1kjPPgAAqD4CPAAALi4uxJeecgAA6gEWsQMAAAAAwAXQAw8AAAAAVXQgu1DH8ktq7Xqh/l4OnS4F10SABwAAAIAqOJBdqP6vLFRRqbXWrunj6aZ59/ettRC/e/duJSYmau3aterYseN5vWby5MmaMGGCsrOzTa2jLiPAAwAAAEAVHMsvUVGp1eGLiJ5JxeKix/JLqny9ffv26cknn9TMmTOVlZWlmJgYXXnllXriiScUHh5+xtfFx8fr0KFDioiIOO9rXXfddRoyZEiV6kPVEOABAAAAoBqcfRHR33//XcnJyWrevLn++9//KjExUZs3b9YDDzygGTNmaPny5QoLCzvldSUlJfLy8lJ0dHSVrufr6ytfX4b51yQWsQMAAACAOmjs2LHy8vLS7Nmz1adPHzVq1EiDBw/W3LlzdeDAAT366KOSpISEBD377LMaPXq0goKCdPvtt2v37t2yWCxat26dvb0ff/xRzZo1k4+Pj1JSUvTJJ5/IYrHYh8xPnjxZISEh9vOfeuopdezYUZ999pkSEhIUHByskSNH6vjx4/ZzZs6cqV69eikkJETh4eEaNmyYdu3aVRvfHpdEgAcAAACAOubo0aOaNWuW7rrrrlN6xaOjo3XjjTfqq6++kmEYkqSXX35ZHTp00Nq1a/X444+f0l5aWpquueYaXXnllVq/fr3uuOMO+wcAZ7Nr1y5NnTpV06ZN07Rp07Ro0SL985//tD+fn5+v++67T6tWrdK8efPk5uamq666SlZr7a0v4EoYQg8AAAAAdczOnTtlGIZatWp12udbtWqlY8eO6fDhw5Kkfv366f7777c/v3v37krnv//++2rRooVeeuklSVKLFi20adMmPf/882etw2q1avLkyQoMDJQkjRo1SvPmzbO/bsSIEZXO/89//qMGDRpoy5Ytatu27fm/4XqCHngAAAAAqKMqetjP5aKLLjrr89u3b1fXrl0rHevWrds5201ISLCHd0mKiYlRZmam/fHOnTt1/fXXq0mTJgoKClJCQoIkae/evedVd31DgAcAAACAOqZp06ayWCzaunXraZ/funWrQkND1aBBA0mSv3/NLMbn6elZ6bHFYqk0PH748OE6evSo/v3vf2vFihVasWKFJNtCejgVAR4AAAAA6pjw8HBdeumleuedd1RYWFjpufT0dH3xxRe67rrrZLFYzqu9Fi1aaNWqVZWOrVy58oJqPHLkiLZv367HHntM/fv3tw/rx5kxBx4AAAAAquFAduG5TzLxOm+//bYuvvhiDRw4UM8991ylbeTi4uLOOX/9ZHfccYdeffVVPfjggxozZozWrVunyZMnS9J5fwjwZ6GhoQoPD9cHH3ygmJgY7d27Vw899FC12qovCPAAAAAAUAWh/l7y8XTTpAWptXZNH083hfp7Vek1zZo106pVq/Tkk0/q2muv1dGjRxUdHa0rr7xSTz755Gn3gD+TxMREffvtt7r//vv1xhtvKDk5WY8++qjuvPNOeXt7V/XtSJLc3Nz05Zdf6u6771bbtm3VokULvfnmm+rbt2+12qsPLMb5rmpQh+Xm5io4OFg5OTkKCgoyuxwAAM7LpgM5GvbWYr1wVTslRjhu7mJaVr4e+X6jpo3vpbZxwQ5rFwBcUVFRkdLS0pSYmCgfHx/78QPZhTqWX3vztEP9vRQX4nvuE2vR888/r/fee0/79u0zuxSndaafH6l6OZQeeAAAAACoorgQX6cL1DXtnXfeUdeuXRUeHq4lS5bopZde0rhx48wuq14hwAMAAAAAzmnnzp167rnndPToUTVq1Ej333+/Hn74YbPLqlcI8AAAAACAc3rttdf02muvmV1GvcY2cgAAAAAAuABTA/y7776r9u3bKygoSEFBQUpOTtaMGTPszxcVFWns2LEKDw9XQECARowYoYyMjEpt7N27V0OHDpWfn58iIyP1wAMPqKysrLbfCgAAAAAANcrUAN+wYUP985//1OrVq7Vq1Sr169dPV1xxhTZv3ixJuvfee/XTTz/pm2++0aJFi3Tw4EFdffXV9teXl5dr6NChKikp0dKlS/XJJ59o8uTJeuKJJ8x6SwAAAAAA1AhT58APHz680uPnn39e7777rpYvX66GDRvqo48+0pQpU9SvXz9J0scff6xWrVpp+fLl6tGjh2bPnq0tW7Zo7ty5ioqKUseOHfXss8/qwQcf1FNPPSUvr9Pvk1hcXKzi4mL749zc3Jp7kwAAAAAAOIDTzIEvLy/Xl19+qfz8fCUnJ2v16tUqLS3VgAED7Oe0bNlSjRo10rJlyyRJy5YtU7t27RQVFWU/Z+DAgcrNzbX34p/OxIkTFRwcbL/Fx8fX3BsDAAAAAMABTF+FfuPGjUpOTlZRUZECAgL0/fffq3Xr1lq3bp28vLwUEhJS6fyoqCilp6dLktLT0yuF94rnK547k4cfflj33Xef/XFubi4hHgAAAMD5y94nFRypvev5hUshZJb6zvQA36JFC61bt045OTn69ttvdfPNN2vRokU1ek1vb295e3vX6DUAAAAA1FHZ+6RJXaXSwtq7pqevNHYlIf4CJCQkaMKECZowYYLD2uzbt686duyo119/3WFtno3pAd7Ly0tNmzaVJHXp0kUrV67UG2+8oeuuu04lJSXKzs6u1AufkZGh6OhoSVJ0dLR+++23Su1VrFJfcQ4AAAAAOFTBEVt4v+R+KbgWAnXOPunXV2zXPc8A/9577+mBBx7QsWPH5OFhi315eXkKDQ1Vz549tXDhQvu5CxcuVEpKilJTU5WUlHTGNivOO3bs2CkjpVE7TA/wf2a1WlVcXKwuXbrI09NT8+bN04gRIyRJ27dv1969e5WcnCxJSk5O1vPPP6/MzExFRkZKkubMmaOgoCC1bt3atPcAAAAAoB4IjpfCm5pdxWmlpKQoLy9Pq1atUo8ePSRJv/76q6Kjo7VixQoVFRXJx8dHkrRgwQI1atTorOHdkQzDUHl5uf2DhZpWUlJyxgXOXY2pi9g9/PDD+uWXX7R7925t3LhRDz/8sBYuXKgbb7xRwcHBGjNmjO677z4tWLBAq1ev1q233qrk5GT7D+Bll12m1q1ba9SoUVq/fr1mzZqlxx57TGPHjmWIPAAAAIB6q0WLFoqJiTmlp/2KK65QYmKili9fXul4SkqKPvvsM1100UUKDAxUdHS0brjhBmVmZkqSdu/erZSUFElSaGioLBaLbrnlFkm2TtiJEycqMTFRvr6+6tChg7799ttK7VssFs2YMUNdunSRt7e3Fi9erL59+2r8+PGaMGGCQkNDFRUVpX//+9/Kz8/XrbfeqsDAQDVt2lQzZsywt1VeXq4xY8bYr9WiRQu98cYbld77LbfcoiuvvFLPP/+8YmNj1aJFi9N+jz788EOFhIRo3rx5kqRNmzZp8ODBCggIUFRUlEaNGqWsrCz7+fn5+Ro9erQCAgIUExOjV155pRr/MhfG1ACfmZmp0aNHq0WLFurfv79WrlypWbNm6dJLL5Ukvfbaaxo2bJhGjBih3r17Kzo6Wt9995399e7u7po2bZrc3d2VnJysm266SaNHj9Yzzzxj1lsCAAAAAKeQkpKiBQsW2B8vWLBAffv2VZ8+fezHCwsLtWLFCqWkpKi0tFTPPvus1q9fr6lTp2r37t32kB4fH6///e9/kmwjow8dOmQPzhMnTtSnn36q9957T5s3b9a9996rm2666ZS1zR566CH985//1NatW9W+fXtJ0ieffKKIiAj99ttvGj9+vO6880793//9ny6++GKtWbNGl112mUaNGqWCggJJtg8LGjZsqG+++UZbtmzRE088oUceeURff/11pWvNmzdP27dv15w5czRt2rRTvjf/+te/9NBDD2n27Nnq37+/srOz1a9fP3Xq1EmrVq3SzJkzlZGRoWuvvdb+mgceeECLFi3SDz/8oNmzZ2vhwoVas2bNhfwTVZmpQ+g/+uijsz7v4+OjSZMmadKkSWc8p3Hjxpo+fbqjSwMAAAAAl5aSkqIJEyaorKxMhYWFWrt2rfr06aPS0lK99957kmxbcxcXFyslJUWNGjWyv7ZJkyZ688031bVrV+Xl5SkgIEBhYWGSpMjISPsc+OLiYr3wwguaO3eufapzkyZNtHjxYr3//vvq06ePvc1nnnnG3llboUOHDnrsscck2UZo//Of/1RERIRuu+02SdITTzyhd999Vxs2bFCPHj3k6empp59+2v76xMRELVu2TF9//XWlsO3v768PP/zwtEPnH3zwQX322WdatGiR2rRpI0l6++231alTJ73wwgv28/7zn/8oPj5eO3bsUGxsrD766CN9/vnn6t+/vyTbhw8NGzasyj/JBXO6OfAAAAAAgAvXt29f5efna+XKlTp27JiaN2+uBg0aqE+fPrr11ltVVFSkhQsXqkmTJmrUqJFWr16tp556SuvXr9exY8dktVolSXv37j3jGmOpqakqKCg4JZiXlJSoU6dOlY5ddNFFp7y+oideso2wDg8PV7t27ezHKrYJrxjKL0mTJk3Sf/7zH+3du1eFhYUqKSlRx44dK7Xbrl2704b3V155Rfn5+Vq1apWaNGliP75+/XotWLBAAQEBp7xm165d9ut0797dfjwsLOyMw/NrCgEeAAAAAOqgpk2bqmHDhlqwYIGOHTtm7w2PjY1VfHy8li5dqgULFqhfv37Kz8/XwIEDNXDgQH3xxRdq0KCB9u7dq4EDB6qkpOSM18jLy5Mk/fzzz4qLi6v03J/XJfP39z/l9Z6enpUeWyyWSscsFosk2T9M+PLLL/X3v/9dr7zyipKTkxUYGKiXXnpJK1asOOe1JOmSSy7Rzz//rK+//loPPfRQpfcxfPhwvfjii6e8JiYmRqmpqadtr7YR4AEAAACgjkpJSdHChQt17NgxPfDAA/bjvXv31owZM/Tbb7/pzjvv1LZt23TkyBH985//VHy8bau6VatWVWqroke7vLzcfqx169by9vbW3r17Kw2XrylLlizRxRdfrLvuust+bNeuXef9+m7dumncuHEaNGiQPDw89Pe//12S1LlzZ/3vf/9TQkLCaVfHT0pKkqenp1asWGGfanDs2DHt2LGjVt53BQI8AAAAAFRHzj6nv05KSorGjh2r0tLSSkGzT58+GjdunEpKSpSSkiIPDw95eXnprbfe0t/+9jdt2rRJzz77bKW2GjduLIvFomnTpmnIkCHy9fVVYGCg/v73v+vee++V1WpVr169lJOToyVLligoKEg333xztWs/nWbNmunTTz/VrFmzlJiYqM8++0wrV65UYmLiebdx8cUXa/r06Ro8eLA8PDw0YcIEjR07Vv/+9791/fXX6x//+IfCwsKUmpqqL7/8Uh9++KECAgI0ZswYPfDAAwoPD1dkZKQeffRRubnV7rrwBHgAAAAAqAq/cMnTV/q1FrcR8/S1XbeKUlJSVFhYqJYtW9rnk0u2AH/8+HH7dnOSNHnyZD3yyCN688031blzZ7388su6/PLL7a+Ji4vT008/rYceeki33nqrRo8ercmTJ+vZZ59VgwYNNHHiRP3+++8KCQlR586d9cgjj1z4+/6TO+64Q2vXrtV1110ni8Wi66+/XnfddVelrebOR69evfTzzz9ryJAhcnd31/jx47VkyRI9+OCDuuyyy1RcXKzGjRtr0KBB9pD+0ksv2YfaBwYG6v7771dOTo7D3+PZWAzDMGr1ik4oNzdXwcHBysnJUVBQkNnlAABwXjYdyNGwtxbrhavaKTHi9HP9qiMtK1+PfL9R08b3Utu4YIe1CwCuqKioSGlpaUpMTJSPj88fT2TvkwqO1F4hfuFSSHztXQ8OccafH1Uvh9IDDwAAAABVFRJPoEatq90B+wAAAAAAoFoI8AAAAAAAuAACPAAAAAAALoAADwAAAADnwNrfqA5H/9wQ4AEAAADgDDw9PSVJBQUFJlcCV1Txc1Pxc3ShWIUeAAAAAM7A3d1dISEhyszMlCT5+fnJYrGYXBWcnWEYKigoUGZmpkJCQuTu7u6QdgnwAAAAAHAW0dHRkmQP8cD5CgkJsf/8OAIBHgAAAADOwmKxKCYmRpGRkSotLTW7HLgIT09Ph/W8VyDAAwAAAMB5cHd3d3ggA6qCRewAAAAAAHABBHgAAAAAAFwAAR4AAAAAABdAgAcAAAAAwAUQ4AEAAAAAcAEEeAAAAAAAXAABHgAAAAAAF0CABwAAAADABRDgAQAAAABwAQR4AAAAAABcAAEeAAAAAAAXQIAHAAAAAMAFEOABAAAAAHABBHgAAAAAAFwAAR4AAAAAABdAgAcAAAAAwAUQ4AEAAAAAcAEEeAAAAAAAXAABHgAAAAAAF0CABwAAAADABRDgAQAAAABwAQR4AAAAAABcAAEeAAAAAAAXQIAHAAAAAMAFEOABAAAAAHABBHgAAAAAAFwAAR4AAAAAABdAgAcAAAAAwAUQ4AEAAAAAcAEEeAAAAAAAXAABHgAAAAAAF0CABwAAAADABRDgAQAAAABwAQR4AAAAAABcAAEeAAAAAAAXQIAHAAAAAMAFEOABAAAAAHABBHgAAAAAAFwAAR4AAAAAABdAgAcAAAAAwAUQ4AEAAAAAcAEEeAAAAAAAXAABHgAAAAAAF0CABwAAAADABRDgAQAAAABwAQR4AAAAAABcAAEeAAAAAAAXQIAHAAAAAMAFEOABAAAAAHABBHgAAAAAAFwAAR4AAAAAABdAgAcAAAAAwAUQ4AEAAAAAcAEEeAAAAAAAXAABHgAAAAAAF0CABwAAAADABRDgAQAAAABwAQR4AAAAAABcAAEeAAAAAAAXQIAHAAAAAMAFEOABAAAAAHABBHgAAAAAAFwAAR4AAAAAABdAgAcAAAAAwAUQ4AEAAAAAcAEEeAAAAAAAXICpAX7ixInq2rWrAgMDFRkZqSuvvFLbt2+vdE7fvn1lsVgq3f72t79VOmfv3r0aOnSo/Pz8FBkZqQceeEBlZWW1+VYAAAAAAKhRHmZefNGiRRo7dqy6du2qsrIyPfLII7rsssu0ZcsW+fv728+77bbb9Mwzz9gf+/n52e+Xl5dr6NChio6O1tKlS3Xo0CGNHj1anp6eeuGFF2r1/QAAAAAAUFNMDfAzZ86s9Hjy5MmKjIzU6tWr1bt3b/txPz8/RUdHn7aN2bNna8uWLZo7d66ioqLUsWNHPfvss3rwwQf11FNPycvLq0bfAwAAAAAAtcGp5sDn5ORIksLCwiod/+KLLxQREaG2bdvq4YcfVkFBgf25ZcuWqV27doqKirIfGzhwoHJzc7V58+bTXqe4uFi5ubmVbgAAAAAAODNTe+BPZrVaNWHCBPXs2VNt27a1H7/hhhvUuHFjxcbGasOGDXrwwQe1fft2fffdd5Kk9PT0SuFdkv1xenr6aa81ceJEPf300zX0TgAAAAAAcDynCfBjx47Vpk2btHjx4krHb7/9dvv9du3aKSYmRv3799euXbuUlJRUrWs9/PDDuu++++yPc3NzFR8fX73CAQAAAACoBU4xhH7cuHGaNm2aFixYoIYNG5713O7du0uSUlNTJUnR0dHKyMiodE7F4zPNm/f29lZQUFClGwAAAAAAzszUAG8YhsaNG6fvv/9e8+fPV2Ji4jlfs27dOklSTEyMJCk5OVkbN25UZmam/Zw5c+YoKChIrVu3rpG6AQAAAACobaYOoR87dqymTJmiH374QYGBgfY568HBwfL19dWuXbs0ZcoUDRkyROHh4dqwYYPuvfde9e7dW+3bt5ckXXbZZWrdurVGjRqlf/3rX0pPT9djjz2msWPHytvb28y3BwAAAACAw5jaA//uu+8qJydHffv2VUxMjP321VdfSZK8vLw0d+5cXXbZZWrZsqXuv/9+jRgxQj/99JO9DXd3d02bNk3u7u5KTk7WTTfdpNGjR1faNx4AAAAAAFdnag+8YRhnfT4+Pl6LFi06ZzuNGzfW9OnTHVUWAAAAAABOxykWsQMAAAAAAGdHgAcAAAAAwAUQ4AEAAAAAcAEEeAAAAAAAXAABHgAAAAAAF0CABwAAAADABRDgAQAAAABwAQR4AAAAAABcAAEeAAAAAAAXQIAHAAAAAMAFEOABAAAAAHABBHgAAAAAAFwAAR4AAAAAABdAgAcAAAAAwAUQ4AEAAAAAcAEEeAAAAAAAXAABHgAAAAAAF0CABwAAAADABRDgAQAAAABwAQR4AAAAAABcAAEeAAAAAAAXQIAHAAAAAMAFEOABAAAAAHABBHgAAAAAAFwAAR4AAAAAABdAgAcAAAAAwAUQ4AEAAAAAcAEEeAAAAAAAXAABHgAAAAAAF0CABwAAAADABRDgAQAAAABwAQR4AAAAAABcAAEeAAAAAAAXQIAHAAAAAMAFEOABAAAAAHABBHgAAAAAAFwAAR4AAAAAABdAgAcAAAAAwAUQ4AEAAAAAcAEEeAAAAAAAXAABHgAAAAAAF0CABwAAAADABRDgAQAAAABwAQR4AAAAAABcAAEeAAAAAAAXQIAHAAAAAMAFEOABAAAAAHABBHgAAAAAAFwAAR4AAAAAABdAgAcAAAAAwAUQ4AEAAAAAcAEEeAAAAAAAXAABHgAAAAAAF0CABwAAAADABRDgAQAAAABwAQR4AAAAAABcAAEeAAAAAAAXQIAHAAAAAMAFEOABAAAAAHABBHgAAAAAAFwAAR4AAAAAABfgYXYBAAAA9cqRXVLxcce26R0ohSc5tk0AgNMhwAMAANSWI7uktzrXTNvj1xDiAaCOI8ADAADUloqe90vul4LjHdNmzj7p11cc36sPAHA6BHgAAIDaFhwvhTc1uwoAgIthETsAAAAAAFwAAR4AAAAAABfAEHoAAEyy/1iBCkvK5evlLn8vD/l6ucvbw00Wi8Xs0gAAgBMiwAMAUItyCkr144aD+nb1fq3fl33K8/Fhvrr9kib6v4vi5ePpXvsFAgAAp0WABwCgFuw5kq9/zdquOZszVFJulSS5u1kU5OOhgpJyFZfZju07WqjHf9ist+an6vbeTXRD90by8+LXNQAAIMADAFDjZm5K1wPfrNfx4jJJUsvoQF3TpaGu6BinBoHekqSycqvyisv04/qDem/hLh3MKdJzP2/Vuwt36Z0bO6t7k3Az3wIAAHACBHgAAGpIablV/5q5Tf/+NU2S1DomSLf3bqKkBv6yWCzKyC1SRm5Rpdd0bhSqSTd21oJtmfp61X6l5xZp1Ee/6eEhLdU1IazSuamZebX2XgAAgPkI8AAA1ICM3CKNm7JGK3cfsx/bcihXE75aV+W2SsqtevqnLWd83seTTWUAAKgPCPAAADjYkbxijfxgudKy8hXo7aHx/ZrqhRnbNDalqeJCfKvUVrnV0Ler92n9/hxZJF3eMVbdE/8YTu/j6aaY4Kq1Wecc2SUVH3d8u96BUniS49sFAKCaCPAAADhQQUmZ/vLJKqVl5SsuxFef/7W78k/MfY8L8VVihH+V2/zHoJaavHS35mzJ0A/rDioiwFsDWkU5unTXdGSX9Fbnmmt//BpCPADAaRDgAQBwkLJyq8ZPWav1+7IV4uepT/7STYkR/tp0IOeC2nWzWHTrxQny9XTXj+sP6tNlu9UsMkCNw6v+YUCdU9Hzfsn9UnC849rN2Sf9+krN9OwDAFBNBHgAABzAMAw9/sMmzduWKW8PN304+iI1jQxwWPsWi0Uju8Zr/7ECrdmbrTfn79TzV7Zjr/gKwfFSeFOzqwAAoEax6g0AAA7w1vxU/fe3fXKzSG9e30kX/WnFeEewWCy6o0+SQvw8dTC7SJ8t3+PwawAAAOdFgAcA4AIt3ZWlV+fskCQ9fUVbDWwTXWPXCvLx1Ni+TWWRNH9bppb/fqTGrgUAAJwLAR4AgAuQW1SqB77ZIEm6vlu8RvVoXOPXbBsXrCs6xkqS/v3r7zp8vOgcrwAAAHUBc+ABAPiTtKx8+8rx5/LanB06kF2o6CAfXd2p4WkXrEvNzHN0iRrRpaE2H8zVzsw8vbtolx4f2loWi8Xh1wEAAM6DAA8AwEnSsvKV8vLCKr8uPbdI//f+srOe4+PpuIFvHm5uGpfSVA98u0FbDx3Xqj3H1LUG5t0DAADnQYAHAOAkFT3vY1OaKi7E94znHS8q1RvzdqqgpFy9mzXQoLZnn/fu4+mmmOAzt1cdkUE+GtIuRlPXHdCUFXvVKT5EHu7MjgMAoK4y9bf8xIkT1bVrVwUGBioyMlJXXnmltm/fXumcoqIijR07VuHh4QoICNCIESOUkZFR6Zy9e/dq6NCh8vPzU2RkpB544AGVlZ3f0EcAAE4nLsRXiRH+p70lhPtp1uYMFZSUq1GYn/56SeIZz624OTq8V7i8Q6yCfD2VnlukuVsza+QaAADAOZga4BctWqSxY8dq+fLlmjNnjkpLS3XZZZcpPz/ffs69996rn376Sd98840WLVqkgwcP6uqrr7Y/X15erqFDh6qkpERLly7VJ598osmTJ+uJJ54w4y0BAOqBFWlHtWbvMXm4WXRX3yR5mtjr7evlrv/r0lCS9L81+8977j4AAHA9pg6hnzlzZqXHkydPVmRkpFavXq3evXsrJydHH330kaZMmaJ+/fpJkj7++GO1atVKy5cvV48ePTR79mxt2bJFc+fOVVRUlDp27Khnn31WDz74oJ566il5eXmZ8dYAAHVUSZlVX6yw7b9+ecdYNQ73N7kiKaVFpGZuSteB7EJNXXdAN3av+ZXwAQBA7XOqiXI5ObaVe8PCbIvwrF69WqWlpRowYID9nJYtW6pRo0Zatsy2UNCyZcvUrl07RUVF2c8ZOHCgcnNztXnz5tNep7i4WLm5uZVuAACcj+kbDykrr0Rh/l66vEOs2eVIktzdLLqheyNJ0sxN6WwrBwBAHeU0Ad5qtWrChAnq2bOn2rZtK0lKT0+Xl5eXQkJCKp0bFRWl9PR0+zknh/eK5yueO52JEycqODjYfouPj3fwuwEA1EVH80s0dd0BSdL13RrJ28Pd5Ir+0Ck+RG1ig1RmNfTlyn1mlwMAAGqA0wT4sWPHatOmTfryyy9r/FoPP/ywcnJy7Ld9+/hDBwBwbl+v2qfiMquaRQaoZ1K42eVUYrFYdFOPxrJIWrrriPYdLTC7JAAA4GBOEeDHjRunadOmacGCBWrYsKH9eHR0tEpKSpSdnV3p/IyMDEVHR9vP+fOq9BWPK875M29vbwUFBVW6AQBwNrsO52nRjsOSpNHJjWWxWEyu6FQJ4f72veB/3njI5GoAAICjmRrgDcPQuHHj9P3332v+/PlKTEys9HyXLl3k6empefPm2Y9t375de/fuVXJysiQpOTlZGzduVGbmH1vnzJkzR0FBQWrdunXtvBEAQJ1mGIY+W2ZbuK5X0wg1jQw0uaIzG9Y+RpK0ODVLR/NLTK4GAAA4kqkBfuzYsfr88881ZcoUBQYGKj09Xenp6SosLJQkBQcHa8yYMbrvvvu0YMECrV69WrfeequSk5PVo0cPSdJll12m1q1ba9SoUVq/fr1mzZqlxx57TGPHjpW3t7eZbw8AUEesSDuq7RnH5e3hppFdnXvdlGZRgWoRFahyq6GZm+iFBwCgLjE1wL/77rvKyclR3759FRMTY7999dVX9nNee+01DRs2TCNGjFDv3r0VHR2t7777zv68u7u7pk2bJnd3dyUnJ+umm27S6NGj9cwzz5jxlgAAdYzVMPTt6v2SpKHtYxQe4PwfDg/rYOuFn7s1UwUl7AsPAEBdYeo+8IZhnPMcHx8fTZo0SZMmTTrjOY0bN9b06dMdWRoAAJKk39KO6kB2ofy83DWkbYzZ5ZyXzo1CFRvio4PZRZq/LVPD2jvHdncAAODCOMUidgAAOCOrYei7Nbbe98Fto+Xvbern3ufNzWLR0Ha20D5jU7rKrFaTKwIAAI5AgAcA4AxW7j6qfccK5evprkEu0vteoVfTCAX7eupofomW7TpidjkAAMABCPAAAJyGrff9gCRb73uAi/S+V/DycNOgNrbtVKdtOHRe09YAAIBzI8ADAHAa2w7lau/RAvl6umuwi/W+VxjQOkreHm7ae7RAmw7mml0OAAC4QAR4AABOY/62TEnSwDZRCvBxrd73CgHeHurTvIEkad7WDJOrAQAAF4oADwDAaRzMKZKPp5uGtHPN3vcK/VpGSpJW7Tmm3MJSk6sBAAAXggAPAMAZXNY6WoE+nmaXcUEah/srqYG/yq2Gftl52OxyAADABSDAAwBwkm3ptrni7m4WDW4bbXI1jpFyohd+wbZMFrMDAMCFEeABADjJD+sOSpI6NgxRiJ+XydU4xsVNIuTt4aaDOUXaln7c7HIAAEA1EeABADjhYHahlqRmSZIubhpucjWO4+vlrouTbO+nYnE+AADgegjwAACc8Mmy3bKeGGEeE+xrbjEOVrGY3Yq0I8orLjO5GgAAUB0EeAAAJBWUlOm/K/aaXUaNSWoQoPgwP5WWG1q8M8vscgAAQDUQ4AEAkPS/1fuVW1SmmGAfs0upERaLRf1a2Hrh529nMTsAAFwRAR4AUO9ZrYY+XrJbknR5h1hzi6lBvZpFyNPdon1HC7TrcL7Z5QAAgCoiwAMA6r1FOw7r96x8Bfp4aECrKLPLqTEB3h7qnmhbzG7hdhazAwDA1RDgAQD13keL0yRJI7vGy9fL3eRqalaf5g0kScvTjqis3GpyNQAAoCoI8ACAeu33w3lanJolN4s0OjnB7HJqXOuYIIX4eiq/uFzr9+eYXQ4AAKgCAjwAoF77auU+SVLfFpGKD/MzuZqa5+ZmUfKJPeEr9rwHAACugQAPAKi3Ssqs+nb1fkm24fP1Rc+mEZKk1XuOqbCk3ORqAADA+SLAAwDqrTlbMnQkv0SRgd7q1zLS7HJqTZMIf0UH+aik3KpVe46aXQ4AADhPBHgAQL315cq9kqRrL4qXh3v9+ZVosVjsvfAMowcAwHXUn79WAAA4yd4jBfp1py28XlePhs9X6NnUNg9+44Ec5RSWmlwNAAA4HwR4AEC99NUqW+/7Jc0i6sXidX8WE+yrpAb+shrS8t+PmF0OAAA4DwR4AEC9U1pu1derbIvXXd+tkcnVmOfiJIbRAwDgSgjwAIB6Z/62TB0+XqyIAC8NaBVldjmmSU4Kl8Ui7czMU0ZukdnlAACAcyDAAwDqnS9/sw2fH9Globw86u+vwlA/L7WJDZYkLd3FMHoAAJxd/f2rBQBQLx3MLtTCHYclSSO71t/h8xV6JtkWs2MYPQAAzo8ADwCoV75fe0CGIXVPDFNihL/Z5Ziua0KY3N0sOpBdqAPZhWaXAwAAzoIADwCoNwzD0PdrD0iSRnRuaHI1zsHf20Pt4mzD6H9LO2pyNQAA4GwI8ACAemPTgVylZubJ28NNg9pFm12O0+iWGCZJWpHGPHgAAJwZAR4AUG9U9L4PaB2lIB9Pk6txHhc1DpWbRdpzpIDV6AEAcGIEeABAvVBWbtWP6w9Kkq7uFGdyNc4l0MdTrU+sRr+CYfQAADgtAjwAoF74NTVLWXnFCvP3Uu/mDcwux+l0S7ANo/+NYfQAADgtAjwAoF74fo1t+Pzw9jHydOfX3591TQiVRdKuw/k6VlBidjkAAOA0+AsGAFDn5RWXafaWdEnSVaw+f1ohfl5qGRMoSdp8MNfkagAAwOkQ4AEAdd7MTekqKrWqSYS/OjQMNrscp9UtIVyStPlAjsmVAACA0yHAAwDqvO/X7pckXdUpThaLxeRqnFfFdnJ7jhaYXAkAADgdAjwAoE47lFOopbtsC7NdyerzZxXm76VmkQFmlwEAAM6AAA8AqNN+XHdQhmFbpC0+zM/scpxe98Rws0sAAABnQIAHANRpP288JEm6vCO97+ejYhi9JOUUlppYCQAA+DMCPACgztp7pEAb9ufIzSINbhttdjkuoUGgt2JDfCRJv6UdNbkaAABwMgI8AKDOquh9T04KV0SAt8nVuI5WMUGSpBW/HzG5EgAAcLJqBfgmTZroyJFTf6lnZ2erSZMmF1wUAACOMP1EgB/SLsbkSlxL6xMBfu2+bBWWlJtcTR1lLZdyD0rHdktZO6SMTVL6Bik/SzIMs6sDADgpj+q8aPfu3SovP/UXenFxsQ4cOHDBRQEAcKH2HMnXxgO24fOD2jB8viqig2xD6IvLrPp152FdxvfvwhmGlLVT2vSt7fGsR6Ty4tOf6x0khSVKoYlSbCcppqPk5l5rpQIAnFeVAvyPP/5ovz9r1iwFBwfbH5eXl2vevHlKSEhwWHEAAFRXxfD5i5MiFM7w+SqxWCz2+3O2ZBDgL4RhSFt+kBZOlA5v++N4ebHk4S15+ErunpK7l2RYpbwMqThXOrTedtsyVfINk5r0lZL6SaEJJr0RAIAzqFKAv/LKKyXZfrHffPPNlZ7z9PRUQkKCXnnlFYcVBwBAdf28wRbgh7Zn+PyFmLctU+VWQ+5ulnOfjD8YhrRrvjTvGenQOtsxd28puq10YLV0yd+lxN6S5U+zGcuKpZx90tHfbUPr9yyVCo9Km7+z3SJaSB1vkGI7Sxb+TQCgvqlSgLdarZKkxMRErVy5UhERETVSFAAAFyItK1+bD+bK3c2igfQeV1uAt4eO5pdo9Z5jlbaXwzlk75V+GCul/WJ77BUgJY+Tku+SjqZJH/SRghueGt4lW698eFPbrdllUrc7bIF/1zxp/0opa7s090kpso3UeZQU1bZ23xsAwFTVmgOflpbm6DoAAHCY6fbh8+EK8/cyuRrX1TUhVAu2H9acLekE+PP1+0Lpm1ttvebu3lLXv0qX3Cf5V7PTw91TatTDdivMljZ9I22bLmVulmY+JMV1kZoNdOQ7AAA4sWoFeEmaN2+e5s2bp8zMTHvPfIX//Oc/F1wYAADVNe3E8PlhDJ+/IN2bhGvB9sOavSVDjwxpVWluPP7EMKSlb0pzn7LNZY/pKF37iWPnrPuGSF1vk1pfJW34Sto529Y7f2i97fnyUsddCwDglKq1jdzTTz+tyy67TPPmzVNWVpaOHTtW6QYAgFl+P5ynrYdy5eFm0WWtGT5/ITo3CpGXh5v2HCnQzsw8s8txXiX50re3SnOesIX3jjdKf5lZcwvO+UdIyWOlK9+zzYW3ltmOf3ebtO+3mrkmAMApVKsH/r333tPkyZM1atQoR9cDAMAFqRg+37NphEIZPn9B/Lw81DMp/MQw+gw1jwo0uyTnU1ooTblO2v2r5OYpDf6ndNGY2llgLihGGvC0tPFrae1n0rE06aPLbOG+/xO2+fQAgDqlWj3wJSUluvjiix1dCwAAF2z6xnRJ0tB2DJ93hEtPjGKYvSXD5EqcUFmJ9PVoW3j3CpRu/sk25702pxpYLLZ58JLUfJAkQ1r2tvTv/lLmtrO+FADgeqoV4P/6179qypQpjq4FAIALsu9ogbYcypWbRRrQOsrscuqEAa0iJUnr92UrI7fI5GqciLXcNmR952zbXu43fi01Tja3pr4PS9d/KfmFSxkbbavd//Zv2/x8AECdUK0h9EVFRfrggw80d+5ctW/fXp6enpWef/XVVx1SHAAAVVHRS9wtMYzV5x0kMshHnRqFaO3ebM3dmqEbuzc2uyTzWa3Sj3dLW6bahs2P/Fxq7CQjE1sMlu5cJk2907b13PS/2/ajv/Jd2yJ4AACXVq0e+A0bNqhjx45yc3PTpk2btHbtWvtt3bp1Di4RAIDzM2uzbfg8i9c51oBWttEM87dmmlyJk5j7pLTuc8niLl3zH6npALMrqiwwSrrxW2nQi5K7l7R9uvRBX+nQBrMrAwBcoGr1wC9YsMDRdQAAcEGO5BVr1e6jkqTL2jB83pH6t4rUS7O2a3FqlgpLyuXr5W52SebZ8oNtuzhJumKS1Ppyc+s5Ezc3qcffpPhu0tc3n1jg7lJp6KtSpxvNrg4AUE3V6oEHAMDZzNuaKashtYkNUsNQP7PLqVNaRAUqLsRXxWVWLUnNMrsc8xzZJU0da7vf8x6p4/Xm1nM+4jpLdyySml4qlRVJP9xlG/5fynoGAOCKqtUDn5KSIstZVlidP39+tQsCAKA6Zm+xDZ8f2Ibh845msVjUv1WkPl22R/O2ZdTPBQJLC20rzpcclxr3lPo9YXZF588vTLrha+nXl6UFL0hrPpEOrZeu/VQKZU0DAHAl1eqB79ixozp06GC/tW7dWiUlJVqzZo3atWvn6BoBADir/OIy/bLT1jPM8Pma0f/EPPh5WzNl1MdVzaf/XcrYJPk3sM17d69WH4h53NykPv+Qbvqf5BsmHVonvd9b2jnH7MoAAFVQrd8+r7322mmPP/XUU8rLy7ugggAAOF9pWfnKLy7TktQslZRZFRPso9IyqzYdyKl2m6mZ/B47nR5NwuTn5a7M48XadCBX7RoGm11S7Vn7ue1mcZNGfCQFuvAoj6b9pTt+kb65WTqwWvri/2zBvs9DtpAPAHBqDv34+KabblK3bt308ssvO7JZAABOkZaVr5SXF1Y6diinSMPfXuKQ9n08CTMn8/Zw1yXNIjRrc4bmbs2oPwE+94D0899t91MekZr0MbceRwiJl26dIc16RFr5obToRdsK9Vd/IPkEmV0dAOAsHBrgly1bJh8fH0c2CQDAaeUXl0mS/tYnSZOXpqmo1KrbL2mihAj/C27bx9NNMcG+F9xOXdO/VZRmbc7QvG0ZuvfS5maXUzt+eVkqK5QSLpF63W92NY7j4S0NfUVq2FX66R5pxwzpwwHS9f+VwpPMrg4AcAbVCvBXX311pceGYejQoUNatWqVHn/8cYcUBgDA+Sgps6qo1KogX0/1ad5Abm5nXmQVF6Zfy0hZLNKmA7lKzylSdHA9+ND+4BrJw1e6/K26OcS8w0gporn05Y1S1nbp3ynOubc9AEBSNRexCw4OrnQLCwtT3759NX36dD355JOOrhEAgDPafNA2371Lo1DCew2LCPBWx/gQSdL8bZnmFlPTik5aR6Hfo1JYonm11LS4ztLtC6X47rb3/cX/SUvelOrjYoUA4OSq1QP/8ccfO7oOAACqZeuhXElS14RQkyupH/q3jNTavdmatzVDN3RvZHY5NcMwpI3f2u43aCl1v9PcempDYJR080/Sz/dLaz+T5jwupW+ULn9T8mQ6CQA4iwsaC7Z69Wp9/vnn+vzzz7V27VpH1QQAwHnLLSqTt4eb2sTWk0XVTFaxndzi1CwVlpSbXE0N2bNEythou9/7H663ZVx1eXjbpgoMfkmyuEsbv5b+M0jKOWB2ZQCAE6oV4DMzM9WvXz917dpVd999t+6++2516dJF/fv31+HDhx1dIwAAZ9W+YbC8POrg/GQn1DI6UHEhvious2rpriyzy3G84jxpxXt/PK5vC7pZLFL326XRU//YL/6DvtLeFSYXBgCQqhngx48fr+PHj2vz5s06evSojh49qk2bNik3N1d33323o2sEAOCsOjVi+HxtsVgs6t8qUpI0d2sdnAe/fopUlC0FRJpdibkSe0u3L5Ci2kr5mdInw/6YVgAAME21AvzMmTP1zjvvqFWrVvZjrVu31qRJkzRjxgyHFQcAwJkczS+x3+90YmE11I5+LW3hdv62DBl1aaGznH3Stp9t99uMMLcWZxCaIP1lltRymFReIv1vjLToJRa3AwATVSvAW61WeXp6nnLc09NTVqv1gosCAOBcVu4+KklqGOqrED8vk6upX3o0CZefl7sycou16UCu2eU4zqr/SEa51LCb1KCF2dU4B+8A6dpPpeRxtscLnpOm3iWVlZz9dQCAGlGtAN+vXz/dc889OnjwoP3YgQMHdO+996p///4OKw4AgDNZmWYL8C2jA02upP7x8XTXJc0iJElzt2aYXI2DHFgj7V9pW7ztojFmV+Nc3Nylgc9LQ1+1fX/WT5E+v7ryVnsAgFpRrQD/9ttvKzc3VwkJCUpKSlJSUpISExOVm5urt956y9E1AgBQSVFpudbuy5YktYwOMreYeqpiNfo6sR+8tVxa9aHtfsthUnCcufU4q65jpBu/kbyDpN2/Sh8PlY6nm10VANQr1doXJT4+XmvWrNHcuXO1bds2SVKrVq00YMAAhxYHAMDpLPv9iIrLbFO2YoJ9TK6mfkppESmLRdp4IEcZuUWKCnLhf4cds6TsvZJ3oNTherOrcW5N+0u3/Cx9PsK21d5Hl0mjvq9/q/UDgEmq1AM/f/58tW7dWrm5ubJYLLr00ks1fvx4jR8/Xl27dlWbNm3066+/1lStAABIkuaftPq5xWIxsZL6q0Ggtzo0DJEkzXPl1ehL8qR1n9vud7zRNucbZxfTXhozWwpNlLL3SP8ZKB1cZ3ZVAFAvVCnAv/7667rtttsUFHTqcMXg4GDdcccdevXVVx1WHAAAf2YYhubVlXnXLm7Aie3kXPrfY+M3UnGuFNxIaj7Y7GpcR1iiLcRHt5fyD0uTh0l7lpldFQDUeVUK8OvXr9egQYPO+Pxll12m1atXX3BRAACcybb04zqYUyRvj2ot4wIHqpgHvzg1S4Ul5SZXUw0FR6Wt02z3u9xiW6wN5y8g0jacPuESqeS4bVg9IR4AalSV/vrJyMg47fZxFTw8PHT48OELLgoAgDOp6O3twN7vpmsZHajYYB8Vl1m1dFeW2eVU3cavpfJi25ZxDbuaXY1r8gmyLWzXpK9Umn8ixC81uyoAqLOqFODj4uK0adOmMz6/YcMGxcTEXHBRAACcybwTq553SwgzuRJYLBZ7L/xcV5sHn5cp7Zhpu99ptMRaCtXn6Std/6XUJOVEiL9G2r3Y7KoAoE6q0ir0Q4YM0eOPP65BgwbJx6fyarOFhYV68sknNWzYMIcWCABAhay8Yq07sX3cRQmh5hYDSVL/VpH6bPkezd+WIcNo6zqLCm74UrKW2eZwx3Q483lZOxx7XUe35yw8faXr/yt9eYO0a770xf9JN/1Panyx2ZUBQJ1SpQD/2GOP6bvvvlPz5s01btw4tWjRQpK0bds2TZo0SeXl5Xr00UdrpFAAABZtPyzDkNrEBikiwNvsciCpR5Nw+Xm5KyO3WJsP5qptXLDZJZ1b7gEpda7tfqdRpz/H09f29bvbaqaGivbrEk9faeQU6csbpV3zpCkjpb/MkKLamF0ZANQZVQrwUVFRWrp0qe688049/PDDMgxDkm0I3cCBAzVp0iRFRUXVSKEAACzYbhumndIi0uRKUMHH0129mkZo9pYMzd2a4RoBft0UybDa5r1Htjr9OUFx0lXvS6WFjr++p6+t/brI01ca+YX02VXS3mXSZ1ef2HKusdmVAUCdUKUAL0mNGzfW9OnTdezYMaWmpsowDDVr1kyhoQxlBADUnLJyq37daVsorW+LBiZXg5MNaBWl2VsyNG9rpiYMaG52OWd3bLeU9ovtfsebzn5uXQ3ZNa1iOP1/BkuHt0qfXy39ZbbkH252ZQDg8qq9B09oaKi6du2qbt26Ed4BADVu/f5s5RSWKtjXUx1Zgd6ppLSMlMUibTyQo4zcIrPLObv1UyQZUuOeUniS2dXUXb6h0qjvpOB46UiqNOX/pOI8s6sCAJdn6ia6v/zyi4YPH67Y2FhZLBZNnTq10vO33HKLLBZLpduf96E/evSobrzxRgUFBSkkJERjxoxRXh6/IACgrlmwzbZN6SXNIuThzh7wzqRBoLc6NAyRJM3f5sSr0R/bc2KLM4vU4Qazq6n7gmKlm76zhfkDq6Vv/yJZy82uCgBcmql/AeXn56tDhw6aNGnSGc8ZNGiQDh06ZL/997//rfT8jTfeqM2bN2vOnDmaNm2afvnlF91+++01XToAoJYt3MH8d2fWv6Xt32Xe1gyTKzmLjV/bvjZOZk52bWnQXLrxW8nDR9o5S5r/nNkVAYBLq/IceEcaPHiwBg8efNZzvL29FR0dfdrntm7dqpkzZ2rlypW66KKLJElvvfWWhgwZopdfflmxsbEOrxkAUPsyjxdp04FcSVLv5sx/d0b9W0XplTk7tDg1S0Wl5fLxdDe7pMpyD0i7f7Xdb3edubXUNw0vki5/W/rur9LiV22r0re7xuyqAMAlOf0YxIULFyoyMlItWrTQnXfeqSNHjtifW7ZsmUJCQuzhXZIGDBggNzc3rVix4oxtFhcXKzc3t9INAOC8Fm23DZ9v3zBYDQLZPs4ZtYoJVGywj4pKrVqSmmV2Oafa+O0fK88z9732tf8/qec9tvs/jJMOrjO1HABwVU4d4AcNGqRPP/1U8+bN04svvqhFixZp8ODBKi+3zZ9KT09XZGTloZQeHh4KCwtTenr6GdudOHGigoOD7bf4+PgafR8AgAuzcIctwPel991pWSwW9W9l20p2nrPNg8/LlHbNt91vd625tdRn/Z+Umg6Qygpte8XnHTa7IgBwOU4d4EeOHKnLL79c7dq105VXXqlp06Zp5cqVWrhw4QW1+/DDDysnJ8d+27dvn2MKBgA4XFm5Vb+eCPB9mP/u1Pq1sv37zN+aKcMwTK7mJJv+JxnlUnT7M+/7jprn5i6N+EgKS5Jy90tfj5bKy8yuCgBcilMH+D9r0qSJIiIilJqaKkmKjo5WZmblT/nLysp09OjRM86bl2zz6oOCgirdAADOae2+bOUWlSnEj+3jnF1yk3D5ebkrPbdImw86yfS0gqPSztm2++1HmlsLJN8Q6fovJa9Aae9SadE/za4IAFyKSwX4/fv368iRI4qJiZEkJScnKzs7W6tXr7afM3/+fFmtVnXv3t2sMgEADrTgxHDs3s0ayN3NYnI1OBsfT3f1ahohSZrrLKvRb/5espZKDVpJ0e3MrgaSbWX6y9+w3f/lZen3RebWAwAuxNQAn5eXp3Xr1mndunWSpLS0NK1bt0579+5VXl6eHnjgAS1fvly7d+/WvHnzdMUVV6hp06YaOHCgJKlVq1YaNGiQbrvtNv32229asmSJxo0bp5EjR7ICPQDUEQtPLGCX0pL5765gwIl58E6xH3zxcWnHDNv99tdKFj4AchptR0idR0sypO9ul/KdcOFDAHBCpgb4VatWqVOnTurUqZMk6b777lOnTp30xBNPyN3dXRs2bNDll1+u5s2ba8yYMerSpYt+/fVXeXv/sQLxF198oZYtW6p///4aMmSIevXqpQ8++MCstwQAcKCM3CJtOZQri8XWAw/n1/fEBy0b9ucoI7fI3GK2/SyVFUmhiVLcRec+H7Vr0ItSRAspL12aeqdktZpdEQA4PVP3ge/bt+9ZF7mZNWvWOdsICwvTlClTHFkWAMBJ/LF9XIjCA9g+zhVEBvqoQ3yI1u/L1vxtmbq+WyNzCikrlrb9ZLvfdgS9787Iy0/6v4+lD1Js6xSseFdKHmt2VQDg1FxqDjwAoH5ZVLH6PNvHuZQBLW2r0c8zcx78rnlSUY7kHyklXGJeHTi7qDbSoIm2+3OelA5tMLceAHByBHgAgFMqtxpanGqbF0uAdy0V+8EvTs1SUWl57RdgLbctXidJba6ybV8G53XRX6SWw2yLDU69UyorMbsiAHBaBHgAgFNavz9bOYWlCvLxUIeGwWaXgypoFROo2GAfFZVatXSXCYuT7V0mHT8keQdKTS+t/eujaiwWadjrkl+4lLFJ+uUlsysCAKdFgAcAOKWK+e+XNGsgD3d+XbkSi8Wifq1sw+jnbq3l1egNQ9r0P9v9lsMkT5/avT6qJ6CBNPQV2/1fX5EOrjO1HABwVvxFBABwSr/stAX43s0jTK4E1VExjH7+1syzLljrcBkbpSM7JXcvW4CH62hzle1mlJ8YSl9sdkUA4HRMXYUeAIDTyS4o0fp92ZKk3sx/d0nJTcLl6+mu9NwibT6Yq7ZxtTQNoqL3vemlkg9TL1zOkFek3YulzC3Sohel/k+YXZF0ZJdUfNzx7XoHSuFJjm8XQJ1GgAcAOJ3FqVmyGlLzqADFBPuaXQ6qwcfTXb2aRWjOlgzN25pZOwH+2G7pwGrJ4ia1vrLmrwfH8w+Xhr0mfXWTtPh1qeVQKa6LefUc2SW91bnm2h+/hhAPoEoI8AAAp/PLie3jejej992VDWgVaQvw2zJ0z4BmNX/BLT/YvjZKloJiav56qBmthkttr5E2fSv9eI90+0LJ/Tz+ZK2JnvKsHbavl9wvBcc7rt2cfba5/jXRsw+gTiPAAwCcimEY+mWHbeVyhs+7tpQT+8Fv2J+jjNwiRQXV4IJyxcel3xfa7tP77voGvyjtmmdb02D5O1LPu89+fk33lEc0l4Liaq59ADhPBHgAgFPZkZGn9NwieXu4qVtimNnl4AJEBvqoQ3yI1u/L1oJtmRrZrVHNXWzPEts+4hEtpMhWNXcd1A7/COmy56QfxkoLJ0qtr5BCG5/5/IqebEf3lEuSpy/hHYDTIMADAJxKxfD5Hk3C5ePpbnI1uFD9W0Zq/b5szd1awwF+92Lb19ZX1Nw1ULs63iit+6+0Z7E0/e/SDV/b9ow/m+B4Kbxp7dQHACZgGzkAgFP5Y/s4hs/XBf1P7Ae/OPWwikrLa+5CJXmSfwOpcc+auwZql8ViW9DO3UvaOVvaMtXsigDAdAR4AIDTKCwp14q0o5KkPuz/Xie0jglSbLCPikqtWrory/EXOHmP+ZZDJTdGbdQpDZpLve6z3Z/xoFSUY249AGAyAjwAwGksTzuikjKr4kJ8ldQgwOxy4AAWi0X9TvTCz9ua6fgLHFxj++ruJTUb5Pj2Yb5e99qGxedlSPOeMbsaADAVAR4A4DTs28c1j5DlXHNd4TL6t4ySJM3flinj5B5zR9j4je1rw26SNx/61EmePrah9JK08iPp0Hpz6wEAExHgAQBOYxH7v9dJyUnh8vV016GcIm0+mOu4hrN2SnuX2e4n9nFcu3A+ib2ltiMkGdL0BypPnQCAeoQADwBwCvuOFuj3w/lyd7Po4qbMf69LfDzd1auZ7d/UocPol7/7x/0APvSp8y59VvL0k/atkDZ8ZXY1AGAKAjwAwClUrD7fKT5Ewb6eJlcDRxtQMQ9+W4ZjGiw4Kq3/r2PagmsIjpN6P2C7P+cJqciBozkAwEWwDzwAwCn8Mf+dntS6KKWFLcBv2J+jzNwiRQb5XFiDqydLpQW2xc2OpF54gTi9I7uk4uOOb9c7UApPqvrrksdKaz+Xju6SFr0oDXze8bUBgBMjwAMATFdabtXS1COSpD4E+DopMshHHRoGa/3+HM3flqmR3RpVv7HyUum3f9vut/s/aeFExxSJyo7skt7qXHPtj19T9RDv4S0NflH64hppxXtS59FSgxY1Ux8AOCECPADAdGv3Zut4cZlC/TzVNi7Y7HJQQ/q3itL6/Tmau/UCA/yWH6TjByX/SCmpHwG+QtaOmmnvkvul4HjHtZuzT/r1ler37De7VGoxRNo+XZrxD2nUVMfVBgBOjgAPADBdxfD5Xs0ayN2N7ePqqn4tI/XqnB1anHpYRaXl8vF0r3ojhiEtm2S73+022/7v9Z2nr+3rd7fVTPsRzaWguJppu7oGviClzpN+Xyht/VEKaWx2RQBQKwjwAADTVSxgx/D5uq1NbJBign10KKdIy3YdUUrLyKo3sm+FdHCN5O4tXfQXKWe/4wt1NUFx0lXvS6WFjm/b09f5wrskhSVKPe+RfvmXNOtRacSHZlcEALWCAA8AMNWRvGJtPJAjSerdjO3j6jKLxaJ+LSP1xYq9mrs1o3oBvqL3vcN1kn8EAb6CM4bsmtbrXttOBDn7pHVfmF0NANQKtpEDAJhqcWqWDENqGR144SuTw+kNaBUlSZq/LVOGYVTtxcd2S9um2e73uMuxhcH1ePn9sQr9+i/NrQUAagkBHgBgqkUn5r/3acHw+fogOSlcPp5uOpRTpM0Hq7iP94oPJMMqNUmRIlvVTIFwLa0ul5r0lcpLzK4EAGoFAR4AYBqr1dAvO7IkSX2aEeDrAx9Pd/Vqavu3nr8t8/xfWJQrrfnUdj95bA1UBpdksUiD/yVZTiyImLnF3HoAoIYR4AEAptmanqusvGL5erqrS0Ko2eWglgxoZZv7Pm9rxvm/aO3nUslx24roSf1rqDK4pAYtpHbX2O5v/l4qLzW3HgCoQQR4AIBpKnrfL04Kl7dHNbYUg0vqd2LxuvX7c5SZW3TuF1jLpRXv2e73uFNy488X/Ennm21f8w9LW6aaWgoA1CRWoQcAmGbRDtsQ6t5sH1evRAb5qH3DYG3Yn6P52zI1slujs79g289S9h7JN1RqP7J2ikTNy9rhuLZyD/xxf8NXtnUS/NnVAkDdQ4AHAJgir7hMq/cck0SAr48GtIrShv05mrMl49wBfvk7tq8X/cW28jhcm6ev7et3tzm+7bAk6eguafXHUu8HHN8+AJiMAA8AMMWyXUdUWm6oUZifEsIJZfXNwDbRenXODv26M0t5xWUK8D7DnyQH1kh7l0lunlLXGgh8qH1BcdJV70ulhY5t19PX1ua0e6W0RVLzQVJ0O8deAwBMRoAHAJiiYvh8n+YNZLFYTK4Gta15VIASwv20+0iBFm7P1LD2sac/saL3ve3VUlBM7RWImhUUV3NtNx8k7Zgh/fa+NOwNyY31NQDUHawCAwCodYZhaOH2E/u/M3y+XrJYLBrYNlqSNHNT+ulPyj1oW1VcknrcVUuVweV1GiV5B0rHdkvbp5tdDQA4FAEeAFDr0rLytf9Yobzc3ZScFG52OTDJoDa2AL9gW6aKSstPPeG3f0vWMqlxTym2Y+0WB9flEyR1HGW7v+5zqSjH3HoAwIEI8ACAWrdoh633vWtiqPzPNPcZdV6HhiGKCvJWfkm5lu7KqvxkSYFtITKJ3ndUXfOBUlgTqSRfWvOJ2dUAgMMQ4AEAta4iwDN8vn5zc7NoYJszDKNf/1+p8JgUmiC1GFz7xcG1ublL3f9mu79zjmO3rAMAExHgAQC1qqi0XMt/PyJJ6tM80uRqYLaKAD93a6bKyq22g1artPxd2/3ud7IIGaonsrVtP3gZ0or3JMNqdkUAcMEI8ACAWvVb2lEVlVoVHeSj5lEBZpcDk3VLDFOIn6eO5pdo5e5jtoOpc6UjOyXvIKnTjeYWCNfW5Vbb9nJZO6TUeWZXAwAXjAAPAKhVJw+fZ/s4eLq7aUCrKEnSrM0nhtEvn2T72nm0bTVxoLr8wqT219vur5ksleSZWg4AXCgCPACgVtkDfAvmv8OmYhj9rM3pMtI3Sb8vlCxuUrfbzS0MdUOr4VJwQ9tq9OummF0NAFwQAjwAoNbsP1ag1Mw8ubtZ1LNphNnlwElc0ixCfl7uOpRTpA3zTgSsVsOl0MbmFoa6wd1T6naH7f62abb94QHARRHgAQC15pcdtq3COsWHKNjX0+Rq4Cx8PN3V98SIjFnbTsyD7zHWxIpQ58R2khol2xayW/GeZBhmVwQA1UKABwDUmkU7MiWxfRxOZd9OrqyzjNguUnw3kytCndP1NsndW8rYJP0+3+xqAKBaCPAAgFpRWm7VktQT28cx/x1/kpIULE+V6XcjVqmtx0oscAhHC4iUOpxY0G7lR1LxcXPrAYBqIMADAGrFmj3HlFdcpjB/L7WNDTa7HDiZoNSp6um2SZI0q6itydWgzmpzpRTSSCrOlVZPNrsaAKgyD7MLAADUfWlZ+fp29X5JUvuGwdpyKPeC20zNZDuoOsMwpOXvapBbhBZaO2rm1kyNG9DC7KpQF7l5SD3ukmY+JO2cJTUdIEW2MrsqADhvBHgAQI1Ky8pXyssL7Y8Xbj+shdsPO6x9H08Gk7m8tF+kjE0a4B0lt3Jp04Fc7T9WoIahfmZXhrooqq3U9FIpdY60fJI07HVbsAcAF8D/rQAANSq/uKzS40eGtFKAt2N+/fh4uikm2NchbcFEy9+RJEV0vlwX7Q/Tb2lHNWtzhsb0SjS5MNRZXW6R9q2wbSm39UepzdVmVwQA54UADwCoNYkR/moXx/x3nCQrVdoxU5JF6nGnBm51swX4TekEeNQcn2Cpy63S0jekdV9IjXtKAVFmVwUA58S4QwBArenQMMTsEuBsVrxr+9p8kBSepIFtbCFq5Z6jOny82MTCUOc1HWAbTl9WLC2bxN7wAFwCAR4AUKPKrX/8Udwhnt53nKTgqLRuiu1+jzslSQ1D/dQuLliGIc3dmmFicajzLBYpeZzk5ikdXCP9vtDsigDgnAjwAIAaVbFavI+nm5pFBppcDZzKmk+k0gJbL2hib/vhil74WZvTzaoM9UVww5P2hv+3VJRjbj0AcA4EeABAjVqz95gkKalBgNzdLCZXA6dRXiqt+MB2v8ddtt7QEwa1jZYkLUnNUm5RqRnVoT5pe7UUmmDbG37lv82uBgDOigAPAKhRq/fYAnzzKHrfcZItP0jHD0r+kVK7ayo91TQyUE0a+Ku03NCCbZkmFYh6w81DSh4vWdxsw+j3rzK7IgA4IwI8AKDGZBeUaEfGcUlSs8gAk6uB0zAMaelbtvtd/yp5eJ9yyqA2tl54htGjVjRoIbUabru/fJJtagcAOCECPACgxixOzVLFGnYhfl7mFgPnsWeJdGid5OEjdR1z2lMqhtEv2HZYhSXltVgc6q2Oo2xbyeUfllZ9bHY1AHBaBHgAQI1ZtP2w2SXAGS192/a1w/WSf8RpT2kXF6y4EF8VlpZr0Q6G0aMWePpIF99tu79jhnRwrbn1AMBpEOABADXCMAwt2kGAx59kpdrCkSQljz3jaRaLRYNP9ML/vJFh9KglMR2kFkNt95e+KZXkm1sPAPwJAR4AUCO2pR9X5vFieXvwqwYnWT7J9rX5YCmi2VlPHdo+RpI0b2uGikoZRo9a0uUWKSD6xFD6j8yuBgAq4a8qAECNWHhi+Hy7uGCTK4HTyD8irZtiu3/xuHOe3jE+RHEhviooKdfC7QyjRy3x9JV6TZBkkXbOlvavNLsiALAjwAMAakTF9l9dE8JMrgROY9VHUlmRFNNRatzznKczjB6miWortbrcdn/pW1Jxnrn1AMAJBHgAgMPlFJRq9V7b/u9dE0JNrgZOobRI+u0D2/2Lx0sWy3m9jGH0ME3nUVJQnFR4VPrtfbOrAQBJkofZBQAA6p5FOw+r3GqoeVSAIoN8zC4H1ZSa6bhex9BtXyou/7AU1FBqfcV5v65iGP2B7EIt3J6pQW1jHFYTcFYePlLPCdLMB6XfF9hGjTTqYXZVAOo5AjwAwOEqhs+ntIw0uRJUh4+nbYDehK/WOahFQ7O93pDcpCPt/qJwd8/zfmXFMPoPF6fp543pBHjUrshWUpurpE3/k5a9LUW2lnyCzK4KQD1GgAcAOFS51bAvONavBQHeFcUE++rVazuoqNTqmPYO/6rmqw7ouOGrjKbXKryKrx/SPkYfLk6zD6P38XR3SF3Aeel4o7RvpZSzV1rxntTnH2ZXBKAeI8ADABxq3b5jOlZQqiAfD3VpHKpt6cfNLgnVEBPs67C2Wq39XJL0ZXmKkr2q3nvZqdIw+sMadGJhO6BWuHtJve6Vpt8v7f5FanyxlNDL7KoA1FMsYgcAcKj5J4bP927eQB7u/Jqp7/yOblFI+lJZLe6aXDawWm1UXo3+kCPLA85PRDOp3f/Z7i9/RyrMNrUcAPUXPfCAC0rLyld+cZnD2/X39lBihL/D20X9Mn+bbf/3fsx/h6TYrR9JkvZFX6oDaQ2q3Q7D6GG69iOlfSukY7ul5ZOkvo+c924KAOAoBHjAxaRl5Svl5YU11v6Cv/clxKPaDuUUauuhXFksUp/m1Q9rqBu8CtIVnvaTJGlrws1SmlHtthhGD9O5e0q97pN+vk/au8y2Mn1SP7OrAlDPEOABF1PR8z42paniQhw3R/VAdqEmLUitkZ591B8LTvS+d4wPUXiAt8nVwGzR2z6Rm1Gm3MiuOhrSVtLGard18mr00zceIsDDHGFNpA43SGs/tS1oF91O8ufDSgC1hwAPuKi4EF96yuF0Kua/s/o83ErzFbXzv5Kkg63/6pA2K4bRz2UYPczUdoS0f4V0eLu05HXp0mclC+t9AKgd/N8GAOAQRaXlWpKaJYn93yFFpn4jj5JcFQYm6FjD/g5ps2IYfUFJuRZuP+yQNoEqc3OXet4nuXtLh9ZL2342uyIA9QgBHgDgEMt/P6LC0nJFBXmrTWzVtwpDHWItU8y2jyVJh1r9xWG9kyevRj+d1ehhpuA46aJbbfdXT5Zy9ptaDoD6gwAPAHCIBRXD51tGysLKzPVa+J4Z8snbp1LvUB1OGuHQtoe0j5Ek+2r0gGlaDJFiOknlxdLiVyUrP48Aah4BHgBwwQzD0NyttgCfwvz3+s0wFLf5PUlSesubZfVw3GKbkm0YfWywj/JLyrVoB8PoYSKLm9TzHsnTX8raIW361uyKANQDBHgAwAXbln5cB7IL5e3hpkuasSJzfRZ86Ff5H9uqcndfpbcY5fD2LRaLhrSz9cL/vIFh9DCZf4TU/W+2++umSEd2mVsPgDqPAA8AuGBztmRIki5pFiFfL1YGr8/iNr0vScpoNlJl3qE1co1Kw+jLqr+3POAQTfpKjS+WjHJp8StSeYnZFQGowwjwAIALNnerLcBf2jrK5EpgpoCs9QrOWCarxUOHWo+psetUGka/p6jGrgOcF4tF6jFW8gmRsvdKaz83uyIAdRgBHgBwQdJzirRhf44sFqlfSwJ8fRZ7Yu57VuIVKvGPrbHrnDyMfnpqYY1dBzhvPsHSxeNt9zd/L2VsMrceAHUWAR4AcEEqet87xYeoQaC3ydXALD45vyts72xJ0sE2t9f49SqG0c/9vUhFhmeNXw84p/juUtNLJRnS4tek0gKzKwJQBxHgAQAXpGL++wCGz9drsVs+kEWGjjYcoMKQZjV+Pfsw+lJDi6wdavx6wHnpepvkHynlZUgrPzK7GgB1kKkB/pdfftHw4cMVGxsri8WiqVOnVnreMAw98cQTiomJka+vrwYMGKCdO3dWOufo0aO68cYbFRQUpJCQEI0ZM0Z5eXm1+C4AoP7KKy7Tsl1HJEmXEeDrLa+CdDX4/XtJ0oE2d5z95MPbpYPrLvhmObReQ5p4SJKmlfdw9FsCqsfLT+o1wXZ/5yxp/0pTywFQ93iYefH8/Hx16NBBf/nLX3T11Vef8vy//vUvvfnmm/rkk0+UmJioxx9/XAMHDtSWLVvk4+MjSbrxxht16NAhzZkzR6Wlpbr11lt1++23a8qUKbX9dgCg3vllx2GVlFuVEO6npAYBZpcDk8Rs/Vhu1lLlRnZVXmSX05+Ul2n7+t1tkttuh1x3mDVJH+pZzbV2VoGlVH4OaRW4QNHtpdZXSFt+kJa+KV0+SfIJMrsqAHWEqQF+8ODBGjx48GmfMwxDr7/+uh577DFdccUVkqRPP/1UUVFRmjp1qkaOHKmtW7dq5syZWrlypS666CJJ0ltvvaUhQ4bo5ZdfVmxszS2gAwCQ5m75Y/V5i8VicjUwg3txjqJ22D40P2vve1mx7Wunm6T4CIdcu4Mhxc8s1758H80/FqphYQ5pFrhwnUZLB9ZIOfukFe9KfR40uyIAdYSpAf5s0tLSlJ6ergEDBtiPBQcHq3v37lq2bJlGjhypZcuWKSQkxB7eJWnAgAFyc3PTihUrdNVVV5227eLiYhUXF9sf5+bm1twbAYA6qqzcqvnbbb2qA1oxfL6+it7xudzL8lUQ0lzZcSnnfkFAtBSe4JBrWyQNb1akd9aV6KfUUg1LYjE7OAkPb+mS+6Wf75d2/yo16iEl9jG7KgB1gNMuYpeeni5Jioqq/EdhVFSU/bn09HRFRkZWet7Dw0NhYWH2c05n4sSJCg4Ott/i4+MdXD0A1H2r9hxTdkGpQv081aVxqNnlwARuZUWK2TpZknSgzd9s+2HXsuFNbaF9wb4yHS8xav36wBmFN5XaX2e7v/xdKT/L3HoA1AlOG+Br0sMPP6ycnBz7bd++fWaXBAAup2L1+ZSWkfJwr5e/Tuq9Bru+lWfxERX5x+lIwlBTamgZ5qakEDeVlEtzdpeaUgNwRu2vlcKbSSV50vJJksGHTAAujNP+xRUdHS1JysjIqHQ8IyPD/lx0dLQyMzMrPV9WVqajR4/azzkdb29vBQUFVboBAM6fYRj2/d9Zfb6espYpdsuHkqRDrf8qw82c4esWi0XDTwyd/2lXmSk1AGfk5iH1utf2df9K6fcFZlcEwMU5bYBPTExUdHS05s2bZz+Wm5urFStWKDk5WZKUnJys7OxsrV692n7O/PnzZbVa1b1791qvGQDqi23px7XnSIG8Pdx0SbMGZpcDE4TvmSGfvL0q9Q5TZtNrTa1lWJJtSZ9f95fpWJHV1FqAU4Q0kjrcYLv/2wdSwVFz6wHg0kwN8Hl5eVq3bp3WrVsnybZw3bp167R3715ZLBZNmDBBzz33nH788Udt3LhRo0ePVmxsrK688kpJUqtWrTRo0CDddttt+u2337RkyRKNGzdOI0eOZAV6AKhBMzbZ1hnp3byB/L2ddj1U1BTDUNzm9yVJ6S1Hy+rha2o5TUPd1TrcTWVWaWYavfBwQm2vlsKSTgylf4eh9ACqzdQAv2rVKnXq1EmdOnWSJN13333q1KmTnnjiCUnSP/7xD40fP1633367unbtqry8PM2cOdO+B7wkffHFF2rZsqX69++vIUOGqFevXvrggw9MeT8AUF/M3HRIkjSozZmnK6HuCj70q/yPbVG5h5/SW4wyuxxJfyxm91Mq8+DhhNw8pJ4TJIu7tG+5dHCt2RUBcFGmdpv07dtXxlk+gbRYLHrmmWf0zDPPnPGcsLAwTZkypSbKAwCcxq7DedqRkScPNwvbx9VHhqGGG9+WJGU0vU5l3s6xA8GwJE+9uKJYyw6WKzPfqkh/p50liPoqLNG2qN36/0qb/md2NQBcFOMeAcBFpWXlK7/Y8cOF/b09lBjhf8bnZ54YPn9x0wgF+7Hvdn0TlLFCQZmrZHXz0qE2t5ldjl18oJs6RbprbWa5pqeV6Za2XmaXBJyq3bXS3mXSsd1mVwLARRHgAcAFpWXlK+XlhTXW/oK/9z1jiJ9xYvj84LYMn6+PGm58S5KU2fT/VOLnXD8Dw5t6aG1muX5KLSXAwzm5e9qG0k+7V5Ih7V0uxXY0uSgAroQADwAuqKLnfWxKU8WFOG4BsQPZhZq0IPWMPfv7jhZo04FcuVnYPq4+CshcreD0ZbJaPHSgzd/MLscuLadc+SVSUrCbLJJWZ5Rr/p5SRfpd2DB6fy8pMdjdMUUCFcKbSk36SL8vlBa/JnUeJXmdedQTAJyMAA8ALiwuxPesw90dbdZm2/D5bolhCg/wrrXrwjlUzH0/nHS1SgLiTK7GJi2nXClf5p9y/C8zCx3S/oKR/oR4OF7zwbYAn5cuLXhBGvi82RUBcBEEeADAeavYPm5w2xiTK0Ft889ar9CDi2RY3HWg7Z3Vbif1uLt0uNxhdaVm2/Z9H9vJS3EBblpxqEw/pJYpLsCisZ2q/yHTgTyrJq0tUX6JoyoFTuJx0s/m8ndti9vFdDCvHgAugwAPADgvGblFWr3nmCRpINvH1TsNN06SJB1OvELFgY2r/HofT4skacJvwdJvp/aYX6ikEDfFBLgp3NdTP+0q04E8Qz4eUkwAq9HDiTXpa+uJ/+ke6a/zJDdGewA4OwI8AOC8VAyf79QoRNHBPiZXg9rkd3SrwvbPlSGLDrS9q1ptxAS461WPSSpqN0oKiHRofScH9SBvi9pGuGnDYauWHSzX1c0J8HBiF98tHVhr2xf+tw+kHtUf3QKgfiDAAwDOy4yNFcPn6X2vbypWnj+SMFRFwU2q3U6M5agUWC4F12yovjjOQxsOl2jZwTJd3ZytDuHE/MKlAU9KP98nzX9OajVcCm5odlUAnBgfSwMAzulIXrFWpB2RxPz3+sbv6BaF750pQxbtbzvW7HLOy0XR7vJwk/YfN7Qv12p2OcDZdblViu8uleRJ0x+QDMPsigA4MQI8AOCcZm5Ol9WQ2sYFKT7Mz+xyUIvi178hydb7XhjawuRqzo+/p0UdGtjmEi89ePotEQGn4eYmDXtdcvOQtk+Xtv5kdkUAnBgBHgBwTj+tPyhJGt4+1uRKUJv8j2xU2P45Mixu2tf+HrPLqZLkOFuAX3agXAY9mnB2Ua2lnif+G5vxD6kox9x6ADgtAjwA4KzSc4q0Iu2oJGloe4bP1yfx61+TJGUlXqGi4CSTq6maLlHu8nKTMgoMpeUQ4OECej8ghTWRjh+S5j1rdjUAnBQBHgBwVj9vPCTDkLo0DlXDUIbP1xcBh9cq9MBCGRZ37W833uxyqszHw6LO0Sd64RlGD1fg6SsNs31oppUfSvtWmlsPAKdEgAcAnNUfw+fpfa9PKnrfDze5WkVBCeYWU00Xx/4xjN7KMHq4giZ9pfYjJRm2veHLS82uCICTIcADAM5o39ECrduXLTeLNIQAX28EZvymkEOLZbV4aH971+t9r9Ah0l2+HtKRIkM7j7EaPVzEwOcl3zApc7O04n2zqwHgZNgHHgBwRj+e6H1PTgpXZKCPydWgVhiG4te/LknKbDhQxcVFUnHqhbebs+/C26giL3eLLop216/7y7X0QLlahLnXeg1AlflHSJc+Lf04Xlr4T6ntCCmID1AB2BDgAQBnxOrz9U/wocUKzlguq2HRgd3bpd0OXn3ew9ux7Z1DcqyHft1fruUHyzS6jafc3Sy1en2gWjreJK3+RDqwSprzuDTiQ7MrAuAkCPAAgNPamXFc29KPy8PNokFto80uB7XBsKrR2n9JktKNUJW0v1Hyj3Rc+x7ett7FWtSugZsCPKXcEmnrEavaNqAXHi7AzU0a+rL0QYq08Rup881S4iVmVwXACRDgAQCnVdH73rt5A4X4eZlcDWpD+O5pCji6WWUefjpQHG4L78FxZpd1QTzcLOoW4675e8u19GAZAR6uI7aTdNFfpFUfSdMfkP72q+TuaXZVAEzGInYAgFMYhqGfNhySJF3egeHz9YGlvESN1r0qSTrY5FqV1aHP+JPjbO9l5aFylVlZjR4upN9jtgXtDm9lQTsAkuiBBwCcxsLth5WWlS8vdzfFhvho04GcareVmpnnwMpQUyJTv5RP3l6V+DbQoYSrpK0rzC7JYVqHuynYW8opljYetqpTFL3wcBF+YSxoB6ASAjwAwM7H0zYw65U5OyRJJeVWXfv+coe2DefjVpqvhhvekiTtbzdeVg9fkytyLDeLRT1iPTQrrUzLDpYR4OFaWNAOwEkI8AAAu5hgX710TXs9/dMW5RWXaVSPxmoVE3TB7fp4uikmuG6FwrokdutH8io6osLAxspsdp10bI/ZJTncxbHumpVWplXp5SopN+Tlzmr0cBF/XtCuyy1SQi+zqwJgEgI8AKCSzOPFyisuU6CPhy5rHSUPd3rO6zKPoiOK2WLr0dvX8e8y3OrmIllNQ90U4WtRVqGhdZnl6hbDn0BwIbGdpItulVb9R/r57yxoB9Rj/FUGAKjklx2HJUk9kyII7/VA/PrX5FGap7ywtjrSeLDZ5dQY2zB629D5pQfKTa4GqIZ+j7OgHQACPADgD3nFZVqz95gk2/ZxqNt8j21X1M4vJUm7L3pMstTtPwuSY2297mszylVYxmr0cDF+YdKAp2z3F/5Tyj1kajkAzFG3f1MDAKpk+e9HVFpuKD7UVwnhfmaXg5pkGEpY/bwshlVHGg3S8ahuZldU4xKDLYr2t6jEKq1OpxceLqjTKCmui1Ry3LagHYB6hwAPALCrGD7fu3kDWSws8lWXhRxYoJBDi2V189Kezg+ZXU6tsFgsuvjEMPolB8pMrgaoBjc3aegrkiy2Be12Lza7IgC1jAAPAJAkHcou1M7MPFksUs+mEWaXgxpksZYqYfULkqRDrW5RcWAjkyuqPT0b2obRbzhsVW4xw+jhgioWtJNsC9qVl5pbD4BaRYAHAEiSftmZJUlqHxesUD8vk6tBTYra/oV8c39XiU+4DrQda3Y5tSo2wE2JwW6yGtLyg/TCw0WdvKDdSvaFB+oTAjwAQFbD0OLUP4bPo+7yKM5W/IY3JEn7Otyncq9AkyuqfT3jKobRMw8eLsovTOr/hO3+ghekvExz6wFQawjwAABtPZSrrLwS+Xm566LGYWaXgxoUv+5VeZTkKD+0pTKbXmt2OaZIjnOXRdKOY1ZlFljNLgeons6jpZiOUnGuNPcps6sBUEsI8AAA++J1PZqEy8uDXw11lf+RjYra8YUkafdFj0tu7iZXZI4wHze1ibD9nLMnPFyWm7s05GXb/XVfSPtWmlsPgFrBX2kAUM/lF5dp+e9HJUl9GD5fdxlWJf72pCwylJUwXLnRyWZXZKqL42yL2S3eXybDYDE7uKj4rlLHm2z3p/9dsvKBFFDXEeABoJ77dWeWSsqtig/1VbPIALPLQQ2JTP1agVnrVOYZoN1dHjW7HNN1j3GXp5t0IM/QnlwCPFzYgCcl72Dp0DppzadmVwOghnmYXQAAwDyGYWj+tgxJUv9WUez97opyD0ilhWc9xaMkV41WT5Qk7Wt6k0oLc6XC3DO/IGefIyt0Sn6eFnWKctdvh8q15ECZEoLZeQEuKiBSSnlEmvmgNO9pqfUVtkXuANRJBHgAqMd2ZuZp37FCebm7qRd7v7ue3APSd7ef87RGlkPydDuufMNb6ZsWSpsWnV/7Ht4XVp+T6xlnC/BLD5Tr+laG3PgAC66q61+lNZ9ImVuk+c9Jw141uyIANYQADwD12Nyttt735KRw+XvzK8HlVPS8t79W8o887SkBeWmK3PKKJCmt9V1SYNPza9vDW/Kv2x/qdIx0l5+HdLTI0LYjVrWOqJ+L+qEOcPeQhrwkTR4qrfqPbYX62I5mVwWgBvDXGgDUU3nFZVr++xFJUv+Wpw9/cBH+kVJw3KnHreVK3PqKLDKUGZOi4w371H5tTszL3aLuse5asNc2jJ4AD5eW0EtqO0La9D9pxj+kv8ySGFUC1DksYgcA9dTinYdVWm6oUZifmrJ4XZ0Uu/cHBRz/XWUeAdrT/Gazy3FKFavRrzhUrtJyFrODi7v0WcnTX9q3QtrwldnVAKgBBHgAqIcMw9DcrZmSbL3vLF5X9/gUHFL8ri8lSbtb/EVlXiHmFuSkWoe7KdTHovxSaf1htuCCiwuOk/o8YLs/+3Gp6CyLVQJwSQR4AKiHdmTk6UB2obw93NSrWd2e51wvGYaabH1HbtYSZYd10OGYFLMrclpuFosujrUNnV+8nwCP/2/vvsOjqvI/jr/v9PTeCQECoXekKU1xURS7orCKrIoFdRX5ubq6a1n7KmLDLqiLYgMbiAhSBaR3SEhID6mQnun398dAFEEgMMnNJN/X89wnkzszdz7hkMx87zn3nBZg8F0Q0RFqimHl81qnEUJ4mRTwQgjRCi07Onldhwj8TTIdSksTVfAzIYd24tKZOND1TrkO9hTOPTKMfkuRi1qHDKMXPs5ghouPFO7r34TivdrmEUJ4lRTwQgjRylTWOVifeWTyuq4yeV1LY7Qdpl3abABykydg84/VOFHz1y5EIT5QweGGjYXSCy9agI6joculoLo8E9qpcmJKiJZCCnghhGhllu4twuFSSY4KIDlKJq9radqlvovBWU11UAcOth2ndRyfoChKfS/8L3lOjdMI4SVjngaDBTJXwZ6vtU4jhPASKeCFEKIVcbrc/LTHM3z+4h5xMnldCxNRuJrIorWoio6MbneDTpZFO13nJnj+rXaVuim3Sm+laAHC2sF593tu//gI2Gs0jSOE8A4p4IUQohVZd6CM8joHYf5GBrUP1zqO8CKTtYQOe98CIK/9tdQGd9A4kW+JCdDRKUyHCqwrkF540UKc+3cIbQuV+bD6Ja3TCCG8QAp4IYRoJVRV5YddhQD8pVssBr28BbQYqpvk3a9hcNZQFdyJ/PbXap3IJx3thf8lX66DFy2E0Q/GPOu5vfY1KMvQNo8Q4qzJpzchhGglUouqyCytwahXOF8mr2tRYotWEHpoBy6dmfQe96HqZGWBMzE43oBOgYxyN6V1bq3jCOEdXS6B5AvAZYfFD2udRghxluQdXgghWomjve/DOkURbDFqnEZ4ix9WknK/ASA7ZTLWgASNE/muELNCz0gd20vcbC+WAl40gdI07x7PHAQRycfuUxTPsnKzhsD+HyH1B+h8sXdfVwjRZKSAF0KIVqCkysrGrEMAXNRdlhVrKRSXnU66AnSqk8ORAyhqM0brSD7v3DYGtpfY2VYsw+hFIzL6eb7Ov837x75ny/FFfGQnGDIVfpkJix6E9iPA5O/91xZCNDop4IUQohX4cXcRqgo9E0JIDJcPbS1F4v6PCVBsOAyBZHSb6ulpE2flnFg9Zj2UyUz0ojEFJ8CVb4OjznvHrMj1TFRnqzrx/SMehF1fQUUOrHoBRj/uvdcWQjQZKeCFEKKFq7O7WJ5aDMDFPaT3vaUIKtpA/IHPAchoPwGHOUzjRC2DxaAwME7P6jzpgReNLLiJL3cxBcDFL8C8GzwT2vW6HqK7NG0GIcRZk0nshBCihVu6t4hau4u4EAu9E0O1jiO8QG+vouMvD6CgUuwO4XBYb60jtSjD2/zWv+FwSU+8aEG6jIXOY8HthIXTQJX/30L4GinghRDHcbjcFFZY2ZVfwfbccooqrbjc8ibvi+xONwt3HgTgst7x6GSIdYvQbuOTWGrysfrFkqnGaB2nxekWqSPE5Lm9oVB64kULc/HzYPSH7F9g+6dapxFCNJAMoReilXOrKgdKauqHWN/w7nqqrM7jHqfXKUQFmmkT5sfQjpGM6hxFrzah6HVSEDZny1OLqahzEBlo4rxOkVrHEV4Qnv0D0Qe+QlV0pPf+B+5f3tE6UoujUxT6ROtZmefi5xwnt/cxax1JCO8Jbeu5Hn7p47DkUUi5CPzDtU4lhDhNUsAL0Upll9WweFchW3LLqaxz1O8/WrzrdQrhASb0ikJJtQ2XW6Ww0kphpZVN2Yd5ddl+wgNMjEiJ4pr+bRiaHIEivbvNitPl5rvtBQCM6x2PQSeDrnydsbaYDr8+CkB+9zuoCu+ucaKW62gBv6nQRVmdmwg/+f0RLciQu2H7Z1CyF5Y9AeNe0TqREOI0SQEvRCuTc6iWr7bksSHzUP0+P6Oe5KgAdhVU8saEvgxJjiTUz4juSO+6y61SWm3jYIWVtKIqVqaWsCqthEM1dhZszWfB1ny6xQVz2/D2XNorHqNePug2B6v3l1JWYyfUz8jIlGit44izpaokr/sHRtthqsN7kNfrXijP0TpVixUT4Pk75lLh23QHk3tKL7xoQfRGuHQGzL4YNs+BPhMhcaDWqYQQp0EKeCFaiaJKK59uyOHXI4W7AgzuEMH5XaLpEhtE7uE6/rlgJ0kRAYQHmI55rl6nEBNsISbYQp/EUK4bkIjD5WZL9mG+33GQLzfnsedgJfd/tp0XFqdyx4hkJg5qi0EKec243CrfHul9v6RXHCaDtIWvi0n7H2EFK3HrzaSfNwNVbzr1k4RXzE+TAl60QElDPYX7trnw/TSYsgL0UhoI0dzJb6kQLZyqqqxILeHDdVnYnG4ABrUP5+p+bc5qPXCjXsegDhEM6hDBtAtTmPtrNnPWZnOwwspj3+7m0w05PH5ZdwZ3iPDWjyIaYP2BMgorrQSaDYzuKpOc+TpLxQGSNj8LQHa/h6gL6ahxotZDr8DOUjdph1ykhOu1jiOEd134JOxbCEU7YcPbMGSq1omEEKcgXTJCtGBVVgcvL03jndUHsDnddI0L4vmre3Hf6JSzKt7/KCzAxN3nd2LNP0bxn8u7E+pvZF9hFde/s557P91KUaXVa68lTs2tqny9LR/wrPtuMUrR4csUt4NOv9yP3mWlPG4YhZ1v1DpSq3JOrOf3Z/5+xykeKYQPCoj0FPEAy5+Binxt8wghTkkKeCFaqF35FTz41Q42Zh1Gr1O4YWBbHh3bjbZeLNz/yGLUc+OQdix/YCQTB7VFUeDb7QVc8NJK5m/JQ5X1ZpvEpqzD5B2uw8+oZ0z3WK3jiLPUZsfrBJbtxGkKIWPo86DIW3dTOr+tZ7DigjSHLKcpWqa+N0KbgWCvhsUPaZ1GCHEK8ilAiBZoRWoxz/6wl/JaB/GhFv5zeQ/PGuBNtORbWICJp6/syXd3n0fftqFU25xM+3w7987bRkWd9GI1Jpdb5fNNuQCM6R5LgFmulPJlgSVbSNj1BgAZg5/G7i8nZJraObF6Qs0KRbUqq/NkTXjRAul0cOnLoOhh77eQtkTrREKIk5ACXogWRFVVvt6az9urDuBW4byOkTxzZU/aRwZokqdHQghf3jGUBy5MQa9T+G57AWNfWc2vB8o0ydMarEorIb+8jkCzgXG947SOI86CzlFDp18eQFHdlLS/gkNJY7WO1CoZ9QpXdDIC8EWqXeM0QjSS2B4w+E7P7UXTwV6jbR4hxJ+SAl6IFsLtVpmzNovPjvS+XtY7nrtGJmM2aHv9s16ncM8FnfjyjiEkRfiTX17H9e+u543l6TKk3stsThdfbskD4Mq+CfibpPfdl7Xb9DSWqmxsAfFkDnxc6zit2rWdPQX8kiwnh+rcGqcRopGMfBiC20B5Nqx4Vus0Qog/IQW8EC2A0+3m1Z/3s2RPEQowaUgSNwxsi6I0zZD509G3bRgL7x3G1f3aoKrw3x9TmfrJFmpsTq2jtRg/7irkUI2dyECTzDzv48JylxKTPg8VhfRzX8JlCtY6UqvWPVJP90gdDjd8ky6XAYkWyhwIl7zkub3uDSjYqm0eIcQJSQEvhI9zu1Vmrcjg18xDGHQK95zfiYt6NM+h04FmAy9d15tnruyJUa+waGchV81aS3aZDNU7W9VWJ98cWff92v6Jsu67DzPWlZC87mEADna7lcqYQRonEgDXdTYB8Nk+h4weEi1X54ugx9WguuHbe8AlJ6yEaG7kE54QPkxVVT74JZN1GWXoFYVpF6YwJLn5r7s+YVBb5k0ZTFSQmdSiKi57/RfW7C/VOpZP+2Z7PrV2F4nh/pzXMVLrOOJMqSod1v8To62MmrAu5PSZpnUiccTlHY2YdLDvkJvdpTKMXrRgFz0PfmFQuNPTEy+EaFakgBfCh83bmMuyfcUowNRRyfRtG6Z1pNPWPymc7+/xzFJfUefg5tkb+GpzntaxfFJptY0fdxcCcMM5iU222oDwvuj0zwjPW4ZbZyL93BmoerPWkcQRoRaFv7T3zCvxuUxmJ1qywCj4y9Oe2yuehbIMbfMIIY4hBbwQPmplWgnfHhkyfcuw9gxJ9r1e15hgC/OmDObyPvE43SoPfLGd15btl+GpDfT5xlwcLpWucUH0SQzVOo74M5X5UJb+p5slZyXtNj4JQE7KzdS6DSd9PGXpUJGr8Q/VuhwdRv/1fgdWp/ydEi1YnwnQfgQ4rfDd30Hel4VoNmSKYiF81NEe1wkD23JBF9+dsMxs0PPydX2ID/XjzRUZvPRTGvnldTx1RQ8MejnHeCqZpTWsTi9FASYMTGpWExeK36nMh/lTTvIAlY66bPSKlQrVn4O7V8Hu1ad/fIP01DeFcxP0xAcqFFSrLMlycllHo9aRhGgcigLjZsKsoZC1GrZ8CP1v1jqVEAIp4IXwOXsPVtbfvqx3PON6x2uYxjt0OoV/XNSF+BALj327m3kbcymqtPL6hH4EmOXP1Ml8sy0fgPO7RNMxOlDjNOJPOeo8X3tdBwHRx92dkP8DQfn7cOr9SO/xMJjDT//YBjME+N4IHF+k1ylck2Lk1S12vki1SwEvWrbwDnD+o7DkEfjxUeg4GkLaaJ1KiFZPureE8CEF5XU8vWgvAN3ighl/TqLGibzrxiHteOuv/bEYdSxPLeH6d9ZTXGXVOlazVlxlI8hi4Ppz2modRZyOgGgISThmC6CWxIJFAGR2vQN7dM/jHnPSTYr3JnXNkWH0a/Jc5FXJZHaihRt8J7QZCPYqGUovRDMhXVtC+Ig6u4spH2+ivNazpMu1A9qga4Th0unF1V49XoDZQPvIgNN+/F+6x/LpbYO55cNN7Myv4KpZa5kzeaD0Lv/B709sTBzUlkCL/Dn3RTqXjU67ZqKobkpjzqM0drjWkcQptA3WMSRez7oCF5/vszPtHIvWkYRoPDo9XP4GvHUepC+FbXOh71+1TiVEqyaf+ITwAaqq8n9fbmdXfiXBFgOVVidmg96rr2Exegbk3PfZNq8eF2D59JENKuL7tg1j/p1DmTR7A9lltVz95lremzSAc9o1YFhxC/fuqkwA2kX4M7xTlMZpxJlKSpuDX20+NnMEmV1v91x3Kpq9CV1NrCuo47N9Du7tb8YgKz+IliwqBc5/BH76Nyz+JySfD8G+f/meEL5KCnghfMCsFRl8v+MgBp3CP8d25aH5O73+GnEhfsy4rjdWh/eGhOaX1/HG8nS255ZTY3M2+PnPXNmTJ7/bQ2pRFRPeXc/0v3Tm3CNrnDe0Z78l+XlfEesOlAFwWZ8EmbjOR4WWbiY27wcAMrrfi9MYpHEicbrGtDcQYVEoqlVZlu1kTHu5Fl60cEPuhj3fQP5m+O4+mPCZnHAUQiNSwAvRzK0/UMZLS1IBePLyHvRICGm014oL8fPq8bzZq+9wqTz7w75j9jW0Z78lqLI6+NfXu+u/jw2W4bu+yGCvJHn36wAcbHspFRG9NU4kGsKkV7ims5G3t9v5ZK9dCnjR8un0cPkseHsY7P8Rtn/qWWpOCNHkpIAXohkrrbZx76dbcatwdb82TBjUll35FVrHOm3e6tV3uVW+217AhqxDAPRrG8qWnDPr1fd1j327m/zyOmKCzRRV2rSOI86EqtJh7yxM9sPUBiSS0/FGrROJMzChq4m3t9tZlesit9JNYrDMCyxauOguMPJhWPYE/PAPaDcMQlvWZLpC+AIp4IVoptxulfs/20ZxlY2O0YH854ruWkc6I97q1b9vdCcWbM3ni815bMkpB8Dhal0zQH+3vYD5W/LRKfDAhZ158KsdWkcSZyDq4HIiitfjVgzs73E/br2s4e6LkkJ0DGujZ3Wei0/32nlwkIyGEa3A0Hsh9QfI2wBf3wk3fQs6OXklRFOS3zghmqlZK9JZvb8Ui1HHrIn98De17vNtiqJwVb823D68A0fni3ry+z1Ut5Je+PzyOh5Z4Jn74O5RHekWH6xxInEmzLZS2u17F4Dc5BuoDe6gcSJxNiZ09Swp93mqA4dLltcSrYDeAFe+BUZ/yFoNv76ldSIhWp3WXREI0UytP1DGjJ/SAPjP5T1IiZHJrY4a2Tkaq8PNh+uy2JpTzvi31zF78jlEB7Xc3i+XW2XaZ9uotDrpnRjKPRd0IrWwSutYosFUOmZ8hMFVR2VoVwraXaF1IHGWRicZiPJXKKlV+SnbydgOci28aAZK07x/THMQRCR7bkckw1+egoXTYOnjnlnpo7t4/zWFECckBbwQzczhGjt/n+e57v2a/m24doBcX/ZHnWM9JzRC/IzsLqjkqllr+fBvA0mOaplrxb+z6gC/Zh7C36TnlfF9MOpl8JQvilfKCK4uwan3I73HfaB4dylI0fSMeoXxnY28vtXOJ3vsUsALbRmPXLI2/7bGOf49W34r4gf8DVIXedaGX3A73LoU9PL/X4imIAW8EM2Iqqo8PH8nRZU2OkQF8OTlvnnde1N58dpePLVwL9lltVxzZK34/kkta634DZmH6lcheHxcd9q1sln3NVOZD446rx0uIH81iUoJAFldbsXmF+O1YwttXd/VxBtb7azJd5FV4aZdiJxgExoJToAr3/bq3y4AKnJh9Utg+93IL0WBy16HWYPh4DZY+Tyc/6h3X1cIcULNuoB//PHHeeKJJ47Z17lzZ/bt8ywlZbVaeeCBB5g3bx42m40xY8Ywa9YsYmLkg5HwTV9szmPx7kKMeoVXr+/b6q97P5W4ED++unMof5uzkR15Fdzwzq88d3VPrurXRutoXpFdVsPtH2/C6Va5tFcc1w5oGT9Xs1eZD/OneO1wOtx00mWiU6Asoj8lced77dhCe22CdIxINLAi18ncPXYeGdJyL+cRPiA4oQlfKw4unQFf/s1T4CefD0lDm+71hWilmn110L17d5YuXVr/vcHwW+T777+fhQsX8sUXXxASEsLdd9/NVVddxS+//KJFVCHOSnZZDU9861nfe9qFnRt1vfeWJDLQzLwpg7lv3jaW7Cli2ufbSSuq5v/GdEZ/dLY7H1RR5+BvczZyuNZBrzYh/Pea3iiK7/48PuVo71Wv6yAg+qwPl5T1KX7FqdhNoRzo8XdPz5VoUW7qbmRFrpPP9tm5f4AZf6O0sWglelwN+5fC9k/gq9vgzjXgF6Z1KiFatGZfwBsMBmJjY4/bX1FRwfvvv88nn3zC+ed7ejNmz55N165dWb9+PYMHD27qqEKcMafLzX2fbaPG7mJg+3CmDJeZqRvC32Tgrb/256WfUnljeQZvrcwgvbiKmdf3JdDc7P/MHcfhcnPX3M1klNQQF2LhvZsG4GeS66WbXEA0hJxdb1ZYyQZii9cAkN7jfpwmWT2gJRrZ1kBSsEJ2pcqC/Q4mdjNpHUmIpjP2BchdD4cOwLf3wnUfyYlKIRpRs79Qa//+/cTHx9OhQwcmTpxITk4OAJs3b8bhcDB69Oj6x3bp0oW2bduybt26kx7TZrNRWVl5zCaEll77OZ2tOeUEWQy8PL6PT/cca0WnU/i/MV2YOb4PJoOOpXuLufKNX0gv9q3Z2lVV5d/f7OaX9DL8TXremzSA6GAZkuuLjLZykne/AUBB28uoiOitcSLRWHSKwqQenqJ9zi47qipLyolWxBwEV78POiPs/Ra2fKh1IiFatGZdwA8aNIg5c+awePFi3nzzTTIzMxk2bBhVVVUUFhZiMpkIDQ095jkxMTEUFhae9LjPPvssISEh9VtioszyLbSzLbec15enA/DUFT1ICPXTOJFvu6JvAp9NGUx0kJn9xdVc9vovfLMtX+tYp0VVVWb8lManG3JQFHj1+r50j5dLKXySqpK85zWMjgpqAtuR0+lGrROJRnZNiokAI+w/7GZtvkvrOEI0rYR+cMG/PLd/eAhKUrXNI0QL1qwL+Isvvphrr72WXr16MWbMGBYtWkR5eTmff/75WR334YcfpqKion7Lzc31UmIhGsbqcPHA59twuVUu6x3P5X2acPKZFqxv2zAW3juMockR1Npd/H3eNv65YCdWR/P9UK2qKk8t3MtrP3tO5vzrkm6M7iYTcvqqmLwfCCvdjFtnZH/Paag6WV6ppQs2K1yT4mnn2bvsGqcRQgND7oEOo8BZ55nYztuz4QshgGZewP9RaGgoKSkppKenExsbi91up7y8/JjHFBUVnfCa+d8zm80EBwcfswmhhRd/TCWjpIaoILMsGedlUUFmPr5lEPee3xFFgU9+zeGqWWvZV9j8LplxuT3LB76/JhOAJy7rzt/Oa69xKnGm/KpzaZc2B4DsTpOoC2yrbSDRZG46Mox+WbaT7Aq3xmmEaGI6HVz5FvhHQtEu+OFBrRMJ0SL5VAFfXV1NRkYGcXFx9O/fH6PRyLJly+rvT01NJScnhyFDhmiYUojTsyHzEO//4inYnr+6J6H+MumRt+l1CtP+0pk5kwcSHmBiz8FKxr22hteW7cfhah4frh0uN/d/to15G3PRKfDCNb2YNLSd1rHEGVLcDjruehmd2055RB8KE8dqHUk0oeRQPSMS9ajAR7ulF160QkGxcPV7gAJbPoJtn2idSIgWp1kX8NOnT2flypVkZWWxdu1arrzySvR6PTfccAMhISHccsstTJs2jeXLl7N582YmT57MkCFDZAZ60ezV2JxM/2I7qgrX9m/D+V1kqHRjGpESxeK/D2N01xgcLpWXfkrjijd+Ye9BbXvjiyqtTPpgA99uL8CgU3j1hr5cN0Dm5PBliRmfEFh1AIcxiPTu94LSrN9mRSO4+Ugv/OepdmocMpmdaIWSR8HIhz23v58GRbu1zSNEC9OsP1nk5eVxww030LlzZ6677joiIiJYv349UVFRALz88stceumlXH311QwfPpzY2Fjmz5+vcWohTu25H/aRc6iW+BAL/xrXTes4rUJ0sIV3b+rPK9f3IdTfyO4CT2/8E9/t5nBN0/eULd1TxEUzV7E2oww/o563b+zPpb3imzyH8J7gQzuJz/oagIxuU3GYw7UNJDQxItFA+xAdVXaYn+bQOo4Q2hj+f5B8vud6+M8ngc23VoQRojlr1gX8vHnzKCgowGazkZeXx7x580hOTq6/32Kx8MYbb3Do0CFqamqYP3/+Ka9/F0Jrv6SX8vH6bABeuKY3wRaZ3KqpKIrC5X0SWHL/cC7qHovTrTL7lyyG/3c5b63MaJJJ7qwOF//+Zhe3frSJw7UOuscH8/2953FBVxmF4csM9ko67pqJgkpRwoUcjpaRYK3V75eU+2CnHZdbeuFFK6TTwVXvQnAClO2Hb+8BWV5RCK9o1gW8EC1NldXBg1/uAOCvg9tyXqdIjRO1TtFBFt66sT8f3zKQrnHBVFmdPPfDPi54aSUfrcui2ub0+ms6XW6+2pzHRTNX8dE6zwmcW89rz/y7hpIcFej11xNNSFVJ3vM6ZlsZdf7xZKX8TetEQmPXdjYSbILMCjc/ZXn/74kQPiEgEq6dAzoD7F4A697QOpEQLYJB6wBCtGSZpTXU/K4YfHXZfvLL64gNtnB57wR25Vc0+JjpxdXejNiqDesUxff3RLJgaz4vLUklv7yOf3+zm+d/2MdV/dpw05AkOsUEndVrOF1uvt5WwOs/7yerrBaAyEAzL17bi5Gdo73xYwiNxeYuJLxkA27FQFqv6bgNflpHEhoLMCrc1N3E61vtvLndxpj2BhRF0TqWEE0vcSCMecYzI/1P/4LortDxAq1TCeHTpIAXopFkltYw6sUVJ7yvsNLKtW+vO6vjW4wygMYb9DqFa/q34dJecXy2MZeP1mWRUVLDx+uz+Xh9Nr3ahHBex0jO6xRJ/6QwzAb9KY9Za3eyLqOMFaklLNtbREGFFYDwABNThnfgxsFJBJjlz29LEFCZQdLRJeNSJlMb1EHbQKLZmNTDxDs77GwvdvPrQReD4+V3XrRSA6dA4Q7Y+j/4cjLcthwikk/9PCHECcm7iRCN5GjP+9RRHQnzN/LKsv1UWZ2cmxzBJWc5WZnFqCMuRHr5vMli1DNpaDtuGpLE2owyPlqXxU97itiRV8GOvApmrcjAz6inZ0IIsSEWYoLNxARbCLYYKa+zU1Ztp6zGTv7hOjbnHMbu/G2ZuvAAE7cP78BfpXBvUXTOOjrteBGd6qQserAsGSeOEeWv49rORubucfDWNrsU8KL1UhS4ZAaUpELeRvj0Brh1KViCtU4mhE+SdxMhGllCqB8Ldx6kyuokPsTClOHJmAzSe95cKYrCuR0jObdjJMWVVlbtL2XN/hLWpJdRWm1jQ9ah0zpOQqgfIztHMbJzNOd1jMTPdOqee+FDVJUOe9/Er+4gNksUGd3u9nxIFeJ3butl5tO9DlbkOtlb5qJrhPwdEK2UwQzj/wfvjITSVFhwO4yf65nsTgjRIFLAC9HIduVX8Et6KYoCd46U4t2XRAdbuKZ/G67p3wZVVdlXWEVaURXFlTaKq6wUVdqotDoI8zcREWAiPNBEZICZfkmhJEcFyjWvLVh0/hKiClehKjrSej6AyygTEYrjtQvRcXF7AwsPOHlnu52Xz5eRU6IVC4r1FO2zL4bURbDsCbjwCa1TCeFzpIAXopF9sy0fgHG94ukYfXYTogntKIpC17hgusbJkL/WLrAijfb73gUgp+NfqQ7tonEi0Zzd0cfMwgNOvk138MA5ZtoEyUlc0Yq16Q+XvQYLpsAvMyG0LZxzi9aphPApUsAL0UjUI+ud1thdJIb7c03/NhonEkKcLYO9gpTtz9df916QdKXWkUQz1zNKz7kJen7Jd/HeDjuPn2vROpIQZ6Y0zTvHieoM/SfD5tmwaDqEtIGUMd45NkBZBtiqvHe8o8xBMvmeaBakgBeikSxPLQFAp8BdI5Mx6qXXRQif5naRsvPFI+u9J5DR/V657l2cltt7m/klv5bP9tm5u5+JSD95PxA+xHjk0o/5t3n/2KobvpgEk3+A+L5nf7yyDHit39kf58/cs0WKeKE5KeCFaAQF5XW8vTIDgPO7xNAuIkDjREKIs9U24xNCDu3EpbeQ2vshXAZ/rSMJHzGsjZ5eUTp2lLh5d7udhwdLL7zwIcEJcOXb4Kjz7nEPZ3mG0Tvq4JPxcMtPEJZ0dsc82vM+7AEISTzbhL+pyIXVLzVOz74QDSQFvBBepqoq//hqBzV2FwAjUqI0TiSEOFvhRWtJyPoKgIxud1MX6MUPhqLFUxSF+/qb+dviOj7abee23tILL3xMcELjHTu8Axw6AP+7ytMTHxh99scMSYSIjmd/HCGaISnghfCy//2aw+r9pZj0OuwuN3qdDLH1JZmlNdTYnF49ZoDZQPtIGYXhqwIq9tNp10wACtpeRlnsedoGEj5pVFsDvaN0bC9x8/Y2O48MkV54IQC46HlYOA3K0uHjK+Hm78EvTOtUQjRbUsAL4UXZZTU8s3AvAJOGJvHu6kyNE4mGyCytYdSLKxrl2Munj5Qi3geZ6krosu0ZdG47hyP7k50ySetIwkcpisJ9A8xM/qGOj/d4euGj/aUXXggCo+Gmb+CDi6BoF/zvGrjpa8+kcUKI40gBL4SXuNwqD3y+nTqHi0HtwxnXO14KeB9ztOd96qiOJIR6Z73m/PI63lie7vVefdH4dC4rXba9gMl+mJrAJNJ6TgdFr3Us4cNGJhroE61nW7GLt7fZ+ddQ6YUXAvBMDHfT1zDnEsjfBJ/eABO/+G0CPSFEPSnghfCS91YfYFP2YQJMel68tjcVdQ6tI4kzlBDqJ73lrZ5KSvoHBFRnYzeFsq/Po7gN8kFSnJ2j18Lf/EMt/9tj5/beJqIDpBdetHK/X55uzLOw8H7IWg0fXQEXPgkG85kdS4gWSgp4IbwgraiKl5Z43jT+dWk3EsP9qciv0DiVEOKMqCrtlSLCKg7j0pnY1+cR7H4yGaXwjhGJevpG69la7OKt7Xb+Lb3worU61fJ0uevhg7+c3bGFaIGkgBfiLDlcbqZ9vg27y82ozlGMP0dmpxbClyWmzSFWdxgVhfQe91ET0knrSKIFURSF+weYuWlRLXOP9MLHSC+8aI1OtjxdaRpsfA9cdojoBOfcevo98Ua/xp01XwiNSQEvxFl6/ed0duVXEuJn5Lmre6EoMuu8EL4qftdbtMn4FIDMduM5FDNU40SiJRrWRs+AWD2bCl28vMnGcyOkt1C0Un9WaEd0hNAkWPY4lO2HLR/CBY+Dyb8p0wnRLMkpXyHOwrbccl5fng7Af67oQUywDIUUwlfFpH5M0tYXAMh2R1MUPUzjRKKlUhSFhwd5ehM/T3WQdsilcSIhmqGY7nDhU2AMgOI98NOjYK3UOpUQmpMCXogzVG1z8vd5W3G5VS7pFce4XnFaRxJCnKHIAwvosOExAPI6TqBAjdA4kWjp+scauKi9AbcKz/5q0zqOEM1TVGcY87RnSbnSNPjhQagq1DqVEJqSIfRCnKEnvt1Ndlkt8SEWnrmipwyd10B6cXWzPp7wDdH7P6PDr48AcLDzJHI7TIDUrRqnEq3BPwaZWZrtZHmOk7X5ToYmyMcyIY4T0REueh6WPgaVebBoOox+3LNfiFZI3imEOAMLdxzki815KAq8PL4PIf5GrSO1KhajZ/DQfZ9ta9Tji5Yvfvc7JG15DoDCTjeQdc6/4NABjVOJ1qJ9iJ6JXY18uNvBM+utfHtVADo5GSzE8ULbwtgXPUX84SxY/DCMfBgS+mmdTIgmJwW8EA1UUF7Hw/N3AHDXyGQGdZChtk0tLsSPGdf1xupwe/3YFqOOuBDvTyglowWaGVUlcduLtNn1JgD53e8gp+//gRRPoond29/M/P0OdpW6+TbdyRWd5ISwECfkH+HpiV/xDBzcDsuegMF3QspFWicToklJAS9EA7jcKvd/to1Kq5PeiaHcNzpF60itVmMU2Y1BRgs0Q24X7Tc+TmzaXACy+z5IQY87NA4lWqsIPx139DHz3w02/rvBykXtDVgMciJJiBMyBXhmo1/7ChxYAeteh0OZMPA20ElZI1oH+Z8uRAPMWp7Or5mH8DfpeWV8H4x6KZ7EyfniaIGWzGArp9PqvxN6cDUqCgcGPUVxyg1axxKt3C09Tfxvt538apUPdtq5q+9prnctRGukN8J5D0BIW9j6MaQuhIocGPEQWEK0TidEo5MCXojTtP5AGS8vTQPgicu60y4yQONEwldIkd08+JWn0WXF7ViqsnHp/Ug/90UOJV2sdSwhsBgU/m+gmWnLrby2xcYVnYzEB8oJYiH+lKJAr+sgLAlWvQiFO2HhNBj1CIR30DqdEI1K3h2EOA1l1Tb+Pm8rbhWu7teGawckah1JCNEAYTlL6PnD1ViqsrEGtGHXRV9K8S6alSs7GTknVk+dE/6z1qp1HCF8Q+Igz+R2QXFQXeSZoX7/Eq1TCdGopIAX4hTcbpX7P99OUaWNjtGB/OeK7lpHEkKcJsVlI2nT03RZeQd6Zw0VsUPYOfZrasO7ah1NiGMoisJ/zrOgV+CHTCcrc51aRxLCN4QlwSUzIKE/uOyw9lVYMxOcciJMtExSwAtxCm+uzGBVWgkWo443JvTD3yRXngjhC/wP7aHXwsuJ3/s+AAe73MyeCz7EaQnXOJkQJ9YlQs/NPUwAPLbGitWpapxICB9hDoILHoO+N4Kig4ylnt74ijytkwnhdVLAC3ESG7MOMeOn36577xwbpHEiIcQpuV3E73qTnj9ciX9FGnZLJHtHvUvWOf+WWYpFs3ffADPR/gpZlW7e3WHXOo4QvkPRQa/xcOFTYAn1rBf//X2QsUzjYEJ4lxTwQvyJ4kord83dgsutcmXfBK6T696FaPYCS7fTc/FVJG39Lzq3g7LEv7B93A+Ut7lA62hCnJYgk8IjQywAvL7FRm6l91ewEKJFi+sF416F2J6eYfRrXobVL4GjVutkQniFdEUIcQJ2p5u75m6hpMpGSkwgT13RA0WRdXmFaK4M1kO03fpfotM/R0HFaQwk65zHKOlwlWe2YiHOUHp50xfQHYIVekXp2FHi5v6f63hsqPmU70EBJmgfom+ihEI0c/7hnp74nV/A9k/gwHIo2QfDH4TITlqnE+KsSAEvxAk8vXAPm7IPE2Q28PaNAwgwy6+KEM2R4rITnT6PtttexmCvAKC4w1Xk9PsHDr8ojdMJX2Y58mf/vp/rNM2xqcjFuAWn13O4/PoAKeKFOEqnh97Xe3rkV/0Xqg7CD/8H/W6Cbld4htwL4YOkKhHiD77anMeH67IBeHl8H9rLeu9CND9uF5FZ35K4fSaW6lwAasK6kjnwCaqiB2gcTrQEcYE6ZoyyYNVwMvgVOU6WZDuxGOC+/maCTSfuhc+vdvPGVjs1csm8EMeL7gbjXoN1r0L2Wtj0ARRsg/PuB78wrdMJ0WBSwAvxO7vyK/jngp0A3HtBJ0Z3i9E4kRDiGKqb8NwlJG57Gf+K/QDYLZHk9bqHok43yCR1wqviArXtoWsbbGR/uZvMCjdLspxMP8ckl3MJcSbMgTDiYdj/I2x4Bwq2wLf3wHnTIKGf1umEaBD5pCPEESVVNm7/eDM2p5tRnaO47wK5RkqI5kJxO4jI/I6E3W/XF+5OUwj53W+nsPNNuI3+GicUwvv0OoU7+5h4eJWVLUUu1uS7GNZGProJcUYUBVIugqiusOoFKM+Gpf+G7ld5lp/TG7VOKMRpkXcBIQCrw8XtH28iv7yOdhH+zBzfF51OejmE0JrOUUN0xpfE73kXc00BAE5jIIVdbqag2624TMEaJxSicSUG67g6xcjnqQ4+3GWnR6SeMIu8PwlxxsKS4JIZnqH0qQth93wo3OmZ4C44Tut0QpySFPCi1VNVlX98tYMtOeUEmPU8fHFXcg/Xknv47I6bXlztnYBCtEKm6nziUj8iOv0zDPZKwDNU/mDXv1GUMkEKd9GqjOtoYGOhk8wKlfd32HlAhtILcXYMZhh8J8T1hrWvQtl++P5eGDwVOozUOp0QJyUFvGj1Xvs5nW+2eXr2amwubv/fZq8e32KUWU6FOC2qSlDJJuL2ziY8dwmK6lm+yxqUREHXWyhJvga3waJxSCGankGncHsfM4+ssrK5yMWybCej28lwXyHOWtJQiOjoWSe+eDesfhEKtsKgO8Dop3U6IU5ICnjRqn2/o4AZP6XVfz91VEcSQr33B9ti1BEXIm8AwsdU5oPDy0tnGf0gOOGEdykuGxHZi4jbO5vAQ7vq95fHDuVg18mUJ4yS5X5Eq5cUrOP6rkbm7nHw0W4HncP1JAbL74UQZy0wGsY8Azs+gx3zIGMZlOz1DKmP6Kh1OiGOIwW8aLU2ZR3igc+3A3B5n3i+2VZAQqifLBsnWrfKfJg/pXGOfdU7xxTxhrpSYvd/Qkzq/zBZSwFw682UtL+Cwi6TqA3r0jg5hPBRYzsY2FXiYnuJm1c223h6mAWzQYbSC3HWdHroMwFie3l64SsLYNF06D8Zul6mdTohjiEFvGiV9hdVccuHm7A53VzQJZq/ndu+fhi9EK3a0Z73XtdBQLR3jllTDDs+rz+2/6E9xO2bTWTmd+jcnoWr7X4xFHb+K0WdbsBpCT/9YzfGaIGKXO8eTwgv0SkKd/Y189DKOvKrVT7e7eDW3iatYwnRcsT2gHGveq6Lz10PG9/1DKnvdoXWyYSoJwW8aHUOVtRx0wcbqKhz0LdtKK9P6EdGiUw4J8QxAqIh5MRD3s+MSmjxBuI3/4uQol/r91ZF9uFgl5s5lHQxqq6B1/Q25mgB8ExyJEQzE2JWuKuvmWfX21iW46RHlI4YfxlKL4TXWIJh1COQugg2vgf5m6A07dTPE6KJSAEvWpWKWgeTPtjAwQoryVEBfDDpHPxMeq1jCdFyuV1Elm4kXpdJwKZHPbsUA4eSLuJgl8lUR/U982M3xmiBowxmCIj07jGF8JKeUXou62jgm3Qn7263c2cf6YUXwqsUBbpcAjHdYeXzv43M2vAOjHtF1owXmpICXrQaVoeLWz/aSFpRNTHBZj66ZRBhAfKhR4jGoHPZiCpYRnzW11isxaCAS+9HUeeJHOwyGXuAF9fa9fpoASGav2s6G9lT5mb/YTf/2+PQOo4QLVNYO7j0Zc8s9TnrYNtcT2/81e957hNCA1LAi1bB5nRx+8eb2Zh1mCCLgQ//NtCrs80LITz0jmpic38gLud7jI4KAByGQA7a/Si88C1csWfR4y6EqGfQKdzX38Qjq60U1aoAqKqqcSohWiCDBXqN9xTwpkDI2whvDYNxM6HH1VqnE62QFPCixbM73Uydu4WVaSX4GfV8cPM5dIkN1jqWEC2KwV5JfPY3xOYuRO+yAmC1RFPQ7gpKArri3vAO1JZDWbr3XlQmmxOtXLifjvsHmHlyrQ2XCp+lOugZLR/thGg0V78Pa2ZA7q/w5d8gYzlc/DyYZAUj0XTkr7xo0ZwuN3+ft5Wle4sxG3S8P2kA57RrwAzXQoiT0jtqiMv5lrjsbzG4PNek1wQmUdDuKkpjzvMszVPjWSKOVS82TgiZbE60YinhnuvhF+x38r89DkYmGhjdTq7PFaJRBMXCzYtg5XOe97StH0POerh2jmcGeyGagBTwosVyuVWmfb6dH3YVYtLreOemAQztKJNSCeENOpeN2JzvSchagMHpWcWhJrAduckTOBx1jmcCoKMCImHYA+C0eT+ITDYnBOfEegp4gPt+ruPrK3V0DJMJWoVoFHoDnP8otB/uWQmlbD+8dwFc8hL0/avW6UQrIAW8aJGcLjf/9+UOvt1egEGnMGtiP0akRGkdSwjfp7qIOriSxPS5mG1lANQGtCE3+QYORQ8B5U+Ws5IiW4hG1z1Cx+4yN5MW1bLgigCiA2R5OSEaTfvhcMcvMP82yFgG30yF7LUw9kUw+WudTrRg8pddtDg2p4upn2xhwdZ8DDqF127oy+huMVrHEsLnhZRto9f6B+i4+1XMtjJslkj2d/8724e8wqGYc/+8eBdCNIl/DrbQLlhHfrXKzT/UUmWXSe2EaFQBETDxSxj1qOc9cNtceG80lO7XOploweTTlmhR6uwubvtoMz/uLsJk0PHWX/tzcU8vLlclRCtkriui87Zn6bblcQKqs3Aa/MnudBNbh86iNH4UKDJUV4jmIMSs8OFYfyL9FPaUublzSS12lxTxQjQqnQ5G/B/c+LVnWdPi3fDOSNj1ldbJRAslQ+hFi1FldXDLnE1syDqEn1HPe5MGcK5c8y7EGVNcduKzvyYh80v0bjuqoqOwzVjyOlyH0yQrOQjR3KSXu+kYquORwWYeXmVlTb6LKT/WMm2AGd3v56VogAATtA+Rk3RCnFKHEXDHavjyFshe45mlPnsdjHlaJlsVXiUFvGgRiqus3DJnEzvzKwiyGJgz+Rz6J8ls80KcqdDy3bTbuQC/uoMAVIR1J7PLFOoCkzROJoT4I8uRT3P3/Vx33H0rcl2syK09q+Mvvz5AinghTkdQLNz0Dax4Bla/BBvfhfxNnlnqw9ppnU60EFLAC5+XXlzNzbM3kHe4jvAAEx/9bSA9EkK0jiWETzLXFtJOl0t42iwA7KYwslNupjR2+LEzywshmo24QB0zRlmwOo/dv7nIyVdpnp0jEvX8JcmA0oDf4/xqN29stVNj92ZaIVo4vQEu+DckDoYFU6BgK7w1HMbNhB5XaZ1OtABSwAuftiHzELd9tImKOgftIvyZM3kg7SIDtI4lhM9RXDbid79Dm51voFPsqOg4mDSOvA7jcRlkNl0hmru4wOOnNWofaiLQpPDhLgcrc11EWBSu7WLSIJ0QrVDKX+D2VZ6h9Hkb4cvJkPEzXPw8mOSzqjhzMomd8Fnf7yjgr+/9SkWdg75tQ/nqzqFSvAtxBoKKNtD7+7G03f4yOredCtWf7T0eJjtlshTvQvi4i9obubG7EYD5+518lebQOJEQrUhoW5j8AwybDiiw9WN4ewQc3K51MuHDpIAXPsftVpnxUxp3f7IVu8vNmO4xfHLrYCICZYIQIRpCb6+i/a//oseS6/GrzMTuF0Van4fZ425LnX+81vGEEF4ytoORid08RfyXqQ7mSxEvRNPRG+GCf8Gk7yAoHsr2w7sXwKr/gst56ucL8QdSwAufUmV1MOXjTby6zLO+5t/Obc+sif3xM8nkOkI0RGjez/T+7iJi0+YCUNRxPNsu+4my+FGAXOsuREtzabKRG7p6ivgvUh3M3WPHrcoSc0I0mfbD4M5foMul4HbAz0/B+6OheK/WyYSPkWvghc84UFLNbR9tIqOkBpNBxzNX9uSa/m20jiWETzFYy2i/8Ukis74DwBrYlozBz1AZN/TII4q1CyeEaFSXdTSiU2DuHgffZzipsKlM6W3CoJOTdkI0Cf9wGP8/2PE5/PB/ngnu3h4Oo/4JQ+7xTIAnxClID7zwCYt3FXL5G7+QUVJDbLCFL24fIsW7EA2hqkQe+Jo+3/6FyKzvUBUdBd1uY/u4H35XvAshWrpLk43c0ceEToHVeS5mbLRhc0pPvBBNRlGg93i461foNAZcdlj6OLwzAnLWa51O+AA5zSOaNavDxbOL9vLhumwABiSF8fDYLuh1CrvyK7z2OunF1V47lhDNjakmnw7rHyWsYCUANWFdyBjyHDURvTROJoTQwohEA0EmhVc22dha7OaZ9TYeOMdMsFl64oVoMsFxMOEz2PYJLHkEinbBB2Og9wS48EkIjNI6oWimpIAXzVZmaQ13f7KF3QWVANw+vAPX9G/DhS+varTXtBhlUIpoQVQ3MWn/I2nLf9E7a3DrTOT1uoeC7lNQdUat0wkhNNQvRs8jQ8y8sMFG2mE3j6y2Mn2gmaRgeR8UoskoCvSdCCkXwbLHYctHsP0TSF0II/4BA24Bo0XrlKKZkQJeNDuqqjJ/Sz7//mYXNXYXYf5GZlzXh1Fdout73aeO6khCqJ9XX9di1BEX4t1jCtGoKvPBUXfCuyzVOSTvfJngw7s9Dw3rTkbP+7EGtoXD2X9+zIrcxkgqhGiGUsL1PHGuhRc32iisUXlsjZU7+5gYFC8fD4VoUgERcNlr0PcmWDgNCnfAj/+EdbNg5D88vfJyfbw4Qv4niGalpMrGPxfs5Kc9RQAMbB/Oq9f3JTbk2LOPCaF+tJc130VrVpkP86cct1vBTYJSRoJShk5Rcak6stUoikpdsPyl0z++QZZlFKI1SAjS8Z/zLLyy2cauUjczN9u5usrNVSkySkeIJpd4DkxZAVv/Byufh8o8+PYe+OUVGPEQdL/CsyydaNWkgBfNxqKdB3lkwU4O1zow6hXuG53C7cM7YNDLcD4hjnO0573XdRAQDUBQ5X46ZH2Kv7UUgMMh3TnQ7nrs5vCGHdtghoBIb6YVQjRjgSaFhwaZmbvHwQ+ZTr5Kc5J+2M3YDlIoCNHkdHroPwl6jYdN78Pql6AsHebfCksfg4FTPPf7hWmdVGhECnihueJKK098v4eFOw4C0DUumBnX9aZrXLDGyYTwAQHRGPyDaZv2ITEFSwGwm0LJ6nwrZTHneq6vE0KIU9DrFG7qYSIpRMf7O+xsL3GTUW7TOpYQrZfRAkOmQt8b4de3YcPbntF3Sx+DlS9AnxugzwSI7yfv9a2MFPBCMy63ytxfs/nv4lSqbE70OoW7RiZzz/mdMBmk112IU1OJLN1Au9yvMTo880MUJowhp9ONuIyBGmcTQviiEYkGOoToeG2Ljdwqz/JyH+y089wICya9FAlCNDlLMIz4Pzj3Xtj5Jax7A4p3w8b3PFtkiqe3vtd1ENpW67SiCUgBLzSxK7+CRxbsZHuep+jo3SaEp6/sSY+EEI2TCeEbLDX5tNflEnpgHwC1AW050O1OqkK7apxMCOHrEoN1PDXMwqytdn496GL+fgd7yly8MNKPXlF6reMJ0ToZzJ4Z6/tMgAMrPNfJ7/seStPg5/94tthekDLGs758Qj/PcHzR4kgBL5pUcZWVGUvS+GxTLqoKQWYDD17UmQmDktDr5My+EKeiuGzE736XNjtfR6fYcStG8pKvoyDpClkaTgjhNSa9wuUdjfx60EWQCfYdcnPFghpu6Wni/gFm/I3yni2EJhQFkkd5Nmsl7P0Wts+DrDWe2esLd8Cq/4J/BLQ7DxIHQ9tBnuL+bCfAK8sAW5V3fo7fMwdBRLL3j9tCSQEvmoTV4eK91QeYtSKDWrsLgHG94/nXJV2JDpb1LYU4JVUlPPdHkjY9g6UmD4ByNYDMXtOxxvTVOJwQoiV7c7Q/X6TZ+Sbdybs77CzOdPDUMD9GJMrHSCE0ZQmGvn/1bNUlkP4TpP0IGT9DbRns+cazARj9Ia4PxPb8bYvqcvrrzJdlwGv9Gu1H4Z4tUsSfJvnLKxpNZmkNFbV2lu0rZt6GXEqqPZPhpMQEctuwDnSNC6a4ykZx1elPkpNeXN1YcYVotvwP76XdxqcIKVoHgM0/luyUyZRtXgCWaI3TCSFaulCLwisX+HNFJwePrLaSW6UyaVEtIxMN/HOwmZRwGaYrxBnzZq92dDfPNvJhqDsEOesg51fIXQ/WCshZ69mOUnSe6+YjOkJEJ4g88jWiIwTHHzs53tGMwx6AkETv5AWoyPXMtN8YPfstlBTwolGkF1cxesaqE96XVlTN/32546yObzHKJHei5TNV55O4fSZRB+ajoOLWmynoNoX87rfjriyAzV9rHVEI0YqMamtkybUGXt5k48PddlbkOlmV5+T6LkbuH2Amyl/em4VokMbs1b5ni6fYBnC7oTQVDm6Hwp1HhtrvhLrDcDjLs6UvPfb5xgBPj3hER892dPi9f4Tne6EZKeCFV9mcLr7ems/Mpfvr9wWYDYxMiWJg+3CMXljT3WLUERfid9bHEaK5MlgPkbDzDWLT5qJz2wEoTRpLTr+HsAW20TidEKI1CzQp/Guohb92N/Lcehs/Zjn5ZK+Db9Id/LWbiVt7maSQF+J0NUav9ol6tHU6iO7q2Xpf79mnqlBdDGX7oXS/Z635snTP7cNZ4Kj57Zr63/vxn2AJgeAETy99cBsIS4Kw9p7iXpa0a3RSwAuvqKh18L9fs5mzNouS3w2Jv7hHLNcNSMRilOF1QpyKwVpG3N4PiE39GIPDc7lIRcwQcvo9SHVkb43TCSHEb9qH6Hl7jD+/Fjh5er2VHSVu3t5uZ84uOzd0NTGlt4n4QCnkRQtTmtY4xwtJbPpebUWBoBjP1u68Y+9z2qE8+9jC/uB2OLjNc7+1wrMV7zn2eaYATyEfmgThR76GJXmuvxdeIwW8OCuphVV88ms2X2zOq5+cLjbYwtiesXzwSxbDOkVJ8S7EKZhqC4nb/S4x+z9F77ICUB3enZy+D1IRd56czRZCNFuD4g18c2UAP+c4eX2Lna3FLubssjN3j52L2huY2M3EoDg9ivwdE77MeGTk5/zbGvf4zYXBBJGdPNtRBdvgnRFw0XNg8IPKfM9WngvlWVCRB/YaKNrl2X4vuM2R46V4tvAOZz8jfismBbxoMKvDxcIdB/lkQw6bsw/X7+8SG8SU4R24tFc8aUVVfPBLlnYhhfABAWW7iE39iMjMb+uHyldH9CSvx1QOJ472TC4jhBDNnKIoXJBk5Py2BtYVuHhti411BS6+y3DyXYaTjqE6JnYzcmUnE6EWKeSFDwpOgCvfBked949t9PMc31cYLEeujf/DjPEuh2f4/tFr6o9udYegMs+zHVjueazO4Ompj0zxDMcHUN1N9zP4OCngxWlxuVV+PVDGN9sKWLTrIFVWJwB6ncKFXWOYOLgt53WMlDPsQpyC4nYQnr2Y2NSPCC7ZXL+/Mvoc8nreLT3uQgifpSgKQxMMDE0wsKvUxdw9dr7Z7yC93M0Ta208vd7GsDYGLk02cGGSkWCz/K0TPsSXimwt6I2envXwDsfut1Z4huKXpv222So9196X/TZnFh+Og8SB0OYcz5bQH/zDm/Zn8BFSwIs/5XS52Zx9mB93F/H9joJjlntLCPXjhoGJXDcgUdZxF+JUVJWAQ7uJOjCfyMzvMNrKAHArBg4lXczBLpOojmrEtVWFEKKJ9YjU8+xwPx4eZOGbdAef7LWzt8zN8hwny3OcmPRWhiUYGNnWwPA2BpJCZMSREF7VWNfrN5QlBNoM8GzgmTyvpthzvJI0KNwOhw6Avdqzfn3Gz789N6TNb0vjRXfz9PrrTrN8NQe12HXlpYAXx6iodbAmvZSle4tYnlpMea2j/r4QPyNje8ZxeZ94BrYLR6eTM+dCnIylMpPwnB+Jyvwa//Lf3vjsflEUdbqBok4TcPjLOu5CiJYr2KxwY3cTN3Y3kX7YxXcZDr7PcJJR7mZZjpNlOZ4Rfe2CdQxro2dAnIF+MXraBCoyqk+IM9Hcr9dXFAiM8Wzthnmuo19w+4kfW5Hn2fYvObPXumdLiyzipYBv5aqsDjZnH+aHXYVszj5MRnE16u/uD7IYGJAUxnmdIunXNqx+Gbg9BytPetz04upGTC1EM+V2EXhoF2G5PxGe+xP+Fb8NDXPrjByKGUpJwoWUR/YHnR7qKj3bmajI9VJoIYRoGh3D9Nw/QM99/VX2HfL0xq/MdbK5yEVWpZusPW4+3uPpOIj2V+gXo6d7pJ7O4Tq6hutJCFLQSVEvxMn52vX6f5bXXgPlOZ7Z8A9neW47ao9/viUEQttBaFtPj31Igmd9+z8updeCSAHfilgdLtKLq9mRV8G23MNsyy1nf3E1qvrnz6myOlmeWsLy1JIzek2LUYbEiRZMVfGrzCC4cC0hB9cSUvQrBntF/d1uxUBlZF/KinIpcwXjyiuGvLnAXO9lMJi9dywhhGgCiqLQNUJP1wg9d/U1U2VXWVfgZH2Bi81FTnaXuimuVVmc6WRxprP+eYFG6BCqIylYR7sQz9eEIB0x/grR/joCTVLcCwH43vX6f5Y37ndL6KpuqCyA0lQoObIdzvRcY1+43bMdZQn1fM36BeL7NFZqzUgB3wJVWR1kldaSWVZDZkkNaUVV7CusJKusFpf7+Go9JthMUaWNC7vF0L9tGMF+3lnWwWLUERfSzJbFEOJMqSpGaykBh3YRWLqNwNIdBJZux2gvP+ZhTmMg5fHDOZx4IYcTRuGqKobv/g69roMALw+XN5ghINK7xxRCiD9IL/f+7NABJs9a8gBBJoW/tDPyl3aezx9Wp8qOEhfbil3sK3Oz95CLjMNuqh2wo8TNjpIT57EYIMysEG45svkphJkVAk1HNqNCoBECTQoBRgV/I+hPo0f/91mFEBpRdEd62NtA8gWefQ4rHEqHkn1QlgGHMjxFvrXcc3/tmXVANnctpoB/4403+O9//0thYSG9e/fmtddeY+DAgVrH8ipVVSmvdVBYaaWofrNRWGmluNJKYaWVg+VWymrsf3qMUH8j3eOD6ZMYSp/EMPokhlJUaeXS19YwqnM07SMDmvAnEqL5UVw2zNV5WKpzsFTl4FeRjn/5fvzK044r1gHcejOVUf2pjB1CRey5VEf0+MMEK8WeLwHRnmFdQgjhIyxH/pTd93MjDMUFll8fcMLC2GJQGBhnYGDcb39LHS6VzAo3WZVuso98zapwc7BGpbjGU9xbnXDQqXKw5iRDC8/QlN5GEgL1+BvxFP8GhQAj+B85EeBnULDoFSwGMOuR6/eFaApGC8T08GxHOWohaw2sfRUSBmiXrRG1iAL+s88+Y9q0abz11lsMGjSImTNnMmbMGFJTU4mObhkTRKUXVzH21TXYnad3FjzU30h8iB/xoRYSw/1pFxFAuwh/wgNMx7ypFFVa5Xp10bKpKjqXFb2jGoO9AmNdKUZrKca6EozWUkx1pRitJfX7TbVFKJz4w5+KjrqABKpDO1Md2oXqkC7UBrdH1f1u1MrhrGOfJNeqCyF8VFygjhmjLFidp35sQ+RXu3ljq53txW5O0udwQvEBOuIDdAyJP3b/7jIX/1hp5cpOBsx6hSq7Wr9ZnVDnVKlzenr365xgb+Cggne2OwDHKR93lFnvOQFytKi3GBQsejAf+WoxKDjdKnpFwaQHs97z9ehto85zDIPOc9uoA6NewXD09pHvj94++rhQC3QK08sJBNF6Gf1/W8oupI22WRpJiyjgZ8yYwW233cbkyZMBeOutt1i4cCEffPABDz30kMbpvCPc6Kov3o04sODAjB0Ldsw4sCi2+n3+2DBaXWAFiqAIlSLg1z85tgJcooPk4oOEVZv+cO8pzmKf7AL6Uzz3z4qk0zv2qZzkuac47slzNeK/h0Y/78mee3ZtdBavqwKqC53bgVK/OY/7XnE7jtmnd9Sid1ajc9Sid9agd1Sjd9aiqA37pOZSdVgxYsWEVTVRi5la1YwVE+5KnWfG1Jx8YNnpH1SuVRdC+KC4QO/PZdOYPfvD2hhOK7PTrVLrgBqHSq1TxeYEqwtsThWry1PoW51QVONmZZ6L0UkGzHqocUCtQ6XG+bvnOzwnBVy/e1uzuTxbRf17nfdHBZyM50QAmHSeEwPG39026ZVj7tfrwKADveI5EeD5ime/Avoj+377/tjH6RTPpuAZeaBw9LYni3L0viO3jz72mPuOeYznGPzheb8/pjiW/LP8QZUZXOfQrdxJUvypH+5rfL6At9vtbN68mYcffrh+n06nY/To0axbt+6Ez7HZbNhsv61pXlHhmXSqsvIMZ4NuAvrKfBapdxOlVGBWTuNU+Jm8T6w9g+cI0cy4j2wn+i2xK37UGQKxGYKxGoKwGYKw6oKwHvneandSe3APtuj+4B/uvVB6I+RXAs33b4wQQjSlKUk66lzeLTv89CqHCtwcOsPnm49swcfs0+O2BRDrqqWDv9PzgD/hUsHpBodbwaGC68hXh1vB4QanW/HcryoU1ulZX2KiY5ADf4OKyw1OVcF15HlO1XM8l+o5lX70tksF95H73KpSf1v9QwlnPbIJ0ToZgdv5594yJrRp3p+9jtafagM68ny+gC8tLcXlchETE3PM/piYGPbt23fC5zz77LM88cQTx+1PTExslIxCiOaiivpr0k9qaWMHEUII4UOebqTjykVWQjSeO2fCnVqHOE1VVVWEhISc1mN9voA/Ew8//DDTpk2r/97tdnPo0CEiIiKa5TVDlZWVJCYmkpubS3Bw8KmfIJoFaTffJO3mu6TtfJO0m2+SdvNN0m6+S9rON52q3VRVpaqqivj40x/r7/MFfGRkJHq9nqKiomP2FxUVERsbe8LnmM1mzOZjx0CFhoY2VkSvCQ4Oll9YHyTt5puk3XyXtJ1vknbzTdJuvknazXdJ2/mmk7Xb6fa8H+X9mUmamMlkon///ixb9ttkUm63m2XLljFkyBANkwkhhBBCCCGEEN7j8z3wANOmTWPSpEkMGDCAgQMHMnPmTGpqaupnpRdCCCGEEEIIIXxdiyjgx48fT0lJCf/+978pLCykT58+LF68+LiJ7XyV2WzmscceO27Yv2jepN18k7Sb75K2803Sbr5J2s03Sbv5Lmk739QY7aaoDZmzXgghhBBCCCGEEJrw+WvghRBCCCGEEEKI1kAKeCGEEEIIIYQQwgdIAS+EEEIIIYQQQvgAKeCFEEIIIYQQQggfIAV8M/HGG2/Qrl07LBYLgwYNYsOGDX/62N27d3P11VfTrl07FEVh5syZTRdUHKMh7fbuu+8ybNgwwsLCCAsLY/To0Sd9vGg8DWm3+fPnM2DAAEJDQwkICKBPnz58/PHHTZhW/F5D2u735s2bh6IoXHHFFY0bUJxQQ9ptzpw5KIpyzGaxWJowrTiqob9v5eXlTJ06lbi4OMxmMykpKSxatKiJ0oqjGtJuI0eOPO73TVEULrnkkiZMLKDhv28zZ86kc+fO+Pn5kZiYyP3334/Vam2itOL3GtJ2DoeDJ598kuTkZCwWC71792bx4sUNe0FVaG7evHmqyWRSP/jgA3X37t3qbbfdpoaGhqpFRUUnfPyGDRvU6dOnq59++qkaGxurvvzyy00bWKiq2vB2mzBhgvrGG2+oW7duVffu3avefPPNakhIiJqXl9fEyVu3hrbb8uXL1fnz56t79uxR09PT1ZkzZ6p6vV5dvHhxEycXDW27ozIzM9WEhAR12LBh6uWXX940YUW9hrbb7Nmz1eDgYPXgwYP1W2FhYROnFg1tN5vNpg4YMEAdO3asumbNGjUzM1NdsWKFum3btiZO3ro1tN3KysqO+V3btWuXqtfr1dmzZzdt8Fauoe02d+5c1Ww2q3PnzlUzMzPVH3/8UY2Li1Pvv//+Jk4uGtp2Dz74oBofH68uXLhQzcjIUGfNmqVaLBZ1y5Ytp/2aUsA3AwMHDlSnTp1a/73L5VLj4+PVZ5999pTPTUpKkgJeI2fTbqqqqk6nUw0KClI//PDDxoooTuBs201VVbVv377qo48+2hjxxEmcSds5nU516NCh6nvvvadOmjRJCngNNLTdZs+erYaEhDRROvFnGtpub775ptqhQwfVbrc3VURxAmf7Hvfyyy+rQUFBanV1dWNFFCfQ0HabOnWqev755x+zb9q0aeq5557bqDnF8RradnFxcerrr79+zL6rrrpKnThx4mm/pgyh15jdbmfz5s2MHj26fp9Op2P06NGsW7dOw2TiZLzRbrW1tTgcDsLDwxsrpviDs203VVVZtmwZqampDB8+vDGjij8407Z78skniY6O5pZbbmmKmOIPzrTdqqurSUpKIjExkcsvv5zdu3c3RVxxxJm027fffsuQIUOYOnUqMTEx9OjRg2eeeQaXy9VUsVs9b3w2ef/997n++usJCAhorJjiD86k3YYOHcrmzZvrh2ofOHCARYsWMXbs2CbJLDzOpO1sNttxl4X5+fmxZs2a035dw5nFFd5SWlqKy+UiJibmmP0xMTHs27dPo1TiVLzRbv/4xz+Ij48/5pdeNK4zbbeKigoSEhKw2Wzo9XpmzZrFhRde2Nhxxe+cSdutWbOG999/n23btjVBQnEiZ9JunTt35oMPPqBXr15UVFTw4osvMnToUHbv3k2bNm2aInardybtduDAAX7++WcmTpzIokWLSE9P56677sLhcPDYY481RexW72w/m2zYsIFdu3bx/vvvN1ZEcQJn0m4TJkygtLSU8847D1VVcTqd3HHHHfzzn/9sisjiiDNpuzFjxjBjxgyGDx9OcnIyy5YtY/78+Q062Sk98EJo4LnnnmPevHksWLBAJmfyAUFBQWzbto2NGzfy9NNPM23aNFasWKF1LHESVVVV3Hjjjbz77rtERkZqHUc0wJAhQ7jpppvo06cPI0aMYP78+URFRfH2229rHU2chNvtJjo6mnfeeYf+/fszfvx4HnnkEd566y2to4nT9P7779OzZ08GDhyodRRxCitWrOCZZ55h1qxZbNmyhfnz57Nw4UL+85//aB1NnMIrr7xCp06d6NKlCyaTibvvvpvJkyej051+WS498BqLjIxEr9dTVFR0zP6ioiJiY2M1SiVO5Wza7cUXX+S5555j6dKl9OrVqzFjij8403bT6XR07NgRgD59+rB3716effZZRo4c2Zhxxe80tO0yMjLIyspi3Lhx9fvcbjcABoOB1NRUkpOTGze08Mp7nNFopG/fvqSnpzdGRHECZ9JucXFxGI1G9Hp9/b6uXbtSWFiI3W7HZDI1amZxdr9vNTU1zJs3jyeffLIxI4oTOJN2+9e//sWNN97IrbfeCkDPnj2pqalhypQpPPLIIw0qBsWZO5O2i4qK4uuvv8ZqtVJWVkZ8fDwPPfQQHTp0OO3XldbVmMlkon///ixbtqx+n9vtZtmyZQwZMkTDZOJkzrTdXnjhBf7zn/+wePFiBgwY0BRRxe946/fN7XZjs9kaI6L4Ew1tuy5durBz5062bdtWv1122WWMGjWKbdu2kZiY2JTxWy1v/M65XC527txJXFxcY8UUf3Am7XbuueeSnp5ef6IMIC0tjbi4OCnem8jZ/L598cUX2Gw2/vrXvzZ2TPEHZ9JutbW1xxXpR0+eqaraeGHFMc7md85isZCQkIDT6eSrr77i8ssvP/0XPoPJ9oSXzZs3TzWbzeqcOXPUPXv2qFOmTFFDQ0Prl8258cYb1Yceeqj+8TabTd26dau6detWNS4uTp0+fbq6detWdf/+/Vr9CK1SQ9vtueeeU00mk/rll18es2RLVVWVVj9Cq9TQdnvmmWfUJUuWqBkZGeqePXvUF198UTUYDOq7776r1Y/QajW07f5IZqHXRkPb7YknnlB//PFHNSMjQ928ebN6/fXXqxaLRd29e7dWP0Kr1NB2y8nJUYOCgtS7775bTU1NVb///ns1Ojpafeqpp7T6EVqlM/07ed5556njx49v6rjiiIa222OPPaYGBQWpn376qXrgwAF1yZIlanJysnrddddp9SO0Wg1tu/Xr16tfffWVmpGRoa5atUo9//zz1fbt26uHDx8+7deUAr6ZeO2119S2bduqJpNJHThwoLp+/fr6+0aMGKFOmjSp/vvMzEwVOG4bMWJE0wdv5RrSbklJSSdst8cee6zpg7dyDWm3Rx55RO3YsaNqsVjUsLAwdciQIeq8efM0SC1UtWFt90dSwGunIe1233331T82JiZGHTt2bIPWxxXe09Dft7Vr16qDBg1SzWaz2qFDB/Xpp59WnU5nE6cWDW23ffv2qYC6ZMmSJk4qfq8h7eZwONTHH39cTU5OVi0Wi5qYmKjeddddDSoChfc0pO1WrFihdu3aVTWbzWpERIR64403qvn5+Q16PUVVZZyFEEIIIYQQQgjR3Mk18EIIIYQQQgghhA+QAl4IIYQQQgghhPABUsALIYQQQgghhBA+QAp4IYQQQgghhBDCB0gBL4QQQgghhBBC+AAp4IUQQgghhBBCCB8gBbwQQgghhBBCCOEDpIAXQgghhBBCCCF8gBTwQgghhI+ZM2cOoaGhDXrOzTffzBVXXNEoeZqTdu3aMXPmTK1jCCGEEI1CCnghhBCimfizInvFihUoikJ5eTkA48ePJy0trWnDncIfM57qcUe3qKgoxo4dy86dOxv0en92EmPjxo1MmTKlQccSQgghfIUU8EIIIYSP8fPzIzo6WusYZyU1NZWDBw/y448/YrPZuOSSS7Db7Wd93KioKPz9/b2QUAghhGh+pIAXQgghfMyJep+feuopoqOjCQoK4tZbb+Whhx6iT58+xz33xRdfJC4ujoiICKZOnYrD4ai/z2azMX36dBISEggICGDQoEGsWLGi/v7s7GzGjRtHWFgYAQEBdO/enUWLFpGVlcWoUaMACAsLQ1EUbr755pP+DNHR0cTGxtKvXz/uu+8+cnNz2bdvX/39M2bMoGfPngQEBJCYmMhdd91FdXU14OnFnzx5MhUVFfU9+Y8//jhw/BD6nJwcLr/8cgIDAwkODua6666jqKjo1P/IQgghRDMkBbwQQgjh4+bOncvTTz/N888/z+bNm2nbti1vvvnmcY9bvnw5GRkZLF++nA8//JA5c+YwZ86c+vvvvvtu1q1bx7x589ixYwfXXnstF110Efv37wdg6tSp2Gw2Vq1axc6dO3n++ecJDAwkMTGRr776CvitZ/2VV145rewVFRXMmzcPAJPJVL9fp9Px6quvsnv3bj788EN+/vlnHnzwQQCGDh3KzJkzCQ4O5uDBgxw8eJDp06cfd2y3283ll1/OoUOHWLlyJT/99BMHDhxg/Pjxp/cPK4QQQjQzBq0DCCGEEOI333//PYGBgcfsc7lcJ33Oa6+9xi233MLkyZMB+Pe//82SJUvqe6yPCgsL4/XXX0ev19OlSxcuueQSli1bxm233UZOTg6zZ88mJyeH+Ph4AKZPn87ixYuZPXs2zzzzDDk5OVx99dX07NkTgA4dOtQfOzw8HPD0rJ/OBHtt2rQBoKamBoDLLruMLl261N9/33331d9u164dTz31FHfccQezZs3CZDIREhKCoijExsb+6WssW7aMnTt3kpmZSWJiIgAfffQR3bt3Z+PGjZxzzjmnzCmEEEI0J9IDL4QQQjQjo0aNYtu2bcds77333kmfk5qaysCBA4/Z98fvAbp3745er6//Pi4ujuLiYgB27tyJy+UiJSWFwMDA+m3lypVkZGQAcO+99/LUU09x7rnn8thjj7Fjx44z/jlXr17N5s2bmTNnDikpKbz11lvH3L906VIuuOACEhISCAoK4sYbb6SsrIza2trTfo29e/eSmJhYX7wDdOvWjdDQUPbu3XvG2YUQQgitSA+8EEII0YwEBATQsWPHY/bl5eV55dhGo/GY7xVFwe12A1BdXY1er2fz5s3HFPlA/YiAW2+9lTFjxrBw4UKWLFnCs88+y0svvcQ999zT4Czt27cnNDSUzp07U1xczPjx41m1ahUAWVlZXHrppdx55508/fTThIeHs2bNGm655RbsdrtMUieEEKLVkh54IYQQwsd17tyZjRs3HrPvj9+fSt++fXG5XBQXF9OxY8djtt8PU09MTOSOO+5g/vz5PPDAA7z77rvAb9evn2q4/4lMnTqVXbt2sWDBAgA2b96M2+3mpZdeYvDgwaSkpFBQUHDMc0wm0ylfq2vXruTm5pKbm1u/b8+ePZSXl9OtW7cG5xRCCCG0JgW8EEII4ePuuece3n//fT788EP279/PU089xY4dO1AU5bSPkZKSwsSJE7npppuYP38+mZmZbNiwgWeffZaFCxcCnuvSf/zxRzIzM9myZQvLly+na9euACQlJaEoCt9//z0lJSXHXX9/Mv7+/tx222089thjqKpKx44dcTgcvPbaaxw4cICPP/74uCH27dq1o7q6mmXLllFaWnrCofWjR4+mZ8+eTJw4kS1btrBhwwZuuukmRowYwYABA047nxBCCNFcSAEvhBBC+LiJEyfy8MMPM336dPr160dmZiY333wzFoulQceZPXs2N910Ew888ACdO3fmiiuuYOPGjbRt2xbw9K5PnTqVrl27ctFFF5GSksKsWbMASEhI4IknnuChhx4iJiaGu+++u0Gvfffdd7N3716++OILevfuzYwZM3j++efp0aMHc+fO5dlnnz3m8UOHDuWOO+5g/PjxREVF8cILLxx3TEVR+OabbwgLC2P48OGMHj2aDh068NlnnzUomxBCCNFcKKqqqlqHEEIIIYR3XXjhhcTGxvLxxx9rHUUIIYQQXiKT2AkhhBA+rra2lrfeeosxY8ag1+v59NNPWbp0KT/99JPW0YQQQgjhRdIDL4QQQvi4uro6xo0bx9atW7FarXTu3JlHH32Uq666SutoQgghhPAiKeCFEEIIIYQQQggfIJPYCSGEEEIIIYQQPkAKeCGEEEIIIYQQwgdIAS+EEEIIIYQQQvgAKeCFEEIIIYQQQggfIAW8EEIIIYQQQgjhA6SAF0IIIYQQQgghfIAU8EIIIYQQQgghhA+QAl4IIYQQQgghhPAB/w/9EDtx+YLfgwAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA+4AAAK9CAYAAAC6iTZTAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAAD5DklEQVR4nOzdd3hb5fnG8e+RvPeecWJn7wkJgUBiEkhCWQUKYUNZpUDLKKXsPX5lFkoLlFIoUEZpCy0jZEMWgWyyl53hxHEcr3jb0vn9IUvE2Iktx/aR7PtzXb60jo5uOwbr0fs+72uYpmkiIiIiIiIiIj7JZnUAERERERERETkyFe4iIiIiIiIiPkyFu4iIiIiIiIgPU+EuIiIiIiIi4sNUuIuIiIiIiIj4MBXuIiIiIiIiIj5MhbuIiIiIiIiID1PhLiIiIiIiIuLDVLiLiIiIiIiI+DAV7iIiYqmHHnoIwzA65bUmTZrEpEmTPLcXLFiAYRh89NFHnfL6V111FZmZmZ3yWm1VXl7OtddeS0pKCoZhcOutt1odSQ5jGAY333yz1TFERKSTqXAXEZF28+abb2IYhucrJCSEtLQ0pk6dyosvvsihQ4fa5XX27t3LQw89xOrVq9vlfO3Jl7O1xhNPPMGbb77JjTfeyNtvv83ll1/e4nMcDgdpaWkYhsEXX3zRCSn9z/r167nssstIT08nODiYtLQ0Lr30UtavX9/k2CVLlvDQQw9RUlLS+UFFRMQnqXAXEZF298gjj/D222/z5z//mVtuuQWAW2+9lWHDhrF27dpGx953331UVVV5df69e/fy8MMPe10cz5o1i1mzZnn1HG8dLdtf/vIXNm/e3KGvf6zmzZvHCSecwIMPPshll13GmDFjWvWcffv2kZmZybvvvtsJKf3Lv//9b0aPHs3cuXO5+uqr+dOf/sQ111zD/PnzGT16NP/5z38aHb9kyRIefvhhFe4iIuIRYHUAERHpeqZPn85xxx3nuX333Xczb948zjzzTM4++2w2btxIaGgoAAEBAQQEdOyfo8rKSsLCwggKCurQ12lJYGCgpa/fGgUFBQwePNir57zzzjuMHj2aK6+8knvuuYeKigrCw8M7KGHzrHjN1ti+fTuXX345vXv35uuvvyYxMdHz2K9//WtOPvlkLr/8ctauXUvv3r0tTPqD6upqgoKCsNk0viMi4iv0f2QREekUp556Kvfffz87d+7knXfe8dzfXI/77NmzmTBhAjExMURERDBgwADuuecewNWXfvzxxwNw9dVXe6blv/nmm4Crj33o0KGsWLGCU045hbCwMM9zf9zj7uZwOLjnnntISUkhPDycs88+m927dzc6JjMzk6uuuqrJcw8/Z0vZmutxr6io4I477iAjI4Pg4GAGDBjAM888g2majY5z9zZ//PHHDB06lODgYIYMGcLMmTOb/4H/SEFBAddccw3JycmEhIQwYsQI3nrrLc/j7n7/nJwcPvvsM0/23Nzco563qqqK//znP8yYMYMLL7yQqqoqPvnkE8/jzzzzDIZhsHPnzibPvfvuuwkKCqK4uNhz37Jly5g2bRrR0dGEhYUxceJEFi9e3Oh57t+ZDRs2cMkllxAbG8uECRMAWLt2LVdddRW9e/cmJCSElJQUfv7zn3Pw4MEmr79gwQKOO+44QkJC6NOnD6+++uoR11x45513GDNmDKGhocTFxTFjxowmvyPNefrpp6msrOS1115rVLQDJCQk8Oqrr1JRUcHvf/97z/d25513ApCVlXXEf4fW/B7k5eXx85//nOTkZM9xb7zxRpOfgWEYvP/++9x3332kp6cTFhZGWVkZdXV1PPzww/Tr14+QkBDi4+OZMGECs2fPbvH7FhGR9qURdxER6TSXX34599xzD7NmzeK6665r9pj169dz5plnMnz4cB555BGCg4PZtm2bp3gbNGgQjzzyCA888ADXX389J598MgAnnnii5xwHDx5k+vTpzJgxg8suu4zk5OSj5nr88ccxDIO77rqLgoICXnjhBaZMmcLq1as9MwNaozXZDmeaJmeffTbz58/nmmuuYeTIkXz55Zfceeed5OXl8fzzzzc6ftGiRfz73//ml7/8JZGRkbz44oucf/757Nq1i/j4+CPmqqqqYtKkSWzbto2bb76ZrKws/vnPf3LVVVdRUlLCr3/9awYNGsTbb7/NbbfdRo8ePbjjjjsAmhSbP/bf//6X8vJyZsyYQUpKCpMmTeLdd9/lkksuAeDCCy/kt7/9LR9++KGnIHX78MMPOf3004mNjQVcU+6nT5/OmDFjePDBB7HZbPztb3/j1FNPZeHChYwdO7bR83/2s5/Rr18/nnjiCc8HHbNnz2bHjh1cffXVpKSksH79el577TXWr1/PN9984ynKV61axbRp00hNTeXhhx/G4XDwyCOPNPv9Pv7449x///1ceOGFXHvttRw4cICXXnqJU045hVWrVhETE3PEn8///vc/MjMzPb8LP3bKKaeQmZnJZ599BsB5553Hli1beO+993j++edJSEho8u/Qmt+D/fv3c8IJJ3g+8ElMTOSLL77gmmuuoaysrMmig48++ihBQUH85je/oaamhqCgIB566CGefPJJrr32WsaOHUtZWRnLly9n5cqVnHbaaUf8nkVEpAOYIiIi7eRvf/ubCZjffffdEY+Jjo42R40a5bn94IMPmof/OXr++edNwDxw4MARz/Hdd9+ZgPm3v/2tyWMTJ040AfOVV15p9rGJEyd6bs+fP98EzPT0dLOsrMxz/4cffmgC5h/+8AfPfb169TKvvPLKFs95tGxXXnml2atXL8/tjz/+2ATMxx57rNFxF1xwgWkYhrlt2zbPfYAZFBTU6L41a9aYgPnSSy81ea3DvfDCCyZgvvPOO577amtrzfHjx5sRERGNvvdevXqZP/nJT456vsOdeeaZ5kknneS5/dprr5kBAQFmQUGB577x48ebY8aMafS8b7/91gTMv//976ZpmqbT6TT79etnTp061XQ6nZ7jKisrzaysLPO0007z3Of+nbn44oub5KmsrGxy33vvvWcC5tdff+2576yzzjLDwsLMvLw8z31bt241AwICGv0+5ubmmna73Xz88ccbnfP77783AwICmtx/uJKSEhMwzznnnCMeY5qmefbZZ5uA59/h6aefNgEzJyenybGt/T245pprzNTUVLOwsLDR82fMmGFGR0d7fk7u/wZ69+7d5Gc3YsQIr34XRESk42iqvIiIdKqIiIijri7vHr385JNPcDqdbXqN4OBgrr766lYff8UVVxAZGem5fcEFF5Camsrnn3/eptdvrc8//xy73c6vfvWrRvffcccdmKbZZIX2KVOm0KdPH8/t4cOHExUVxY4dO1p8nZSUFC6++GLPfYGBgfzqV7+ivLycr776qk35Dx48yJdfftnovOeffz6GYfDhhx967rvoootYsWIF27dv99z3wQcfEBwczDnnnAPA6tWr2bp1K5dccgkHDx6ksLCQwsJCKioqmDx5Ml9//XWT34df/OIXTTIdPkOiurqawsJCTjjhBABWrlwJuFoj5syZw7nnnktaWprn+L59+zJ9+vRG5/v3v/+N0+nkwgsv9GQqLCwkJSWFfv36MX/+/CP+fNy/54f/bjXH/XhZWdlRj3Nr6ffANE3+9a9/cdZZZ2GaZqPcU6dOpbS01POzcLvyyiubzC6JiYlh/fr1bN26tVW5RESk46hwFxGRTlVeXn7UQuaiiy7ipJNO4tprryU5OZkZM2bw4YcfelXEp6ene7UQXb9+/RrdNgyDvn37ttjffax27txJWlpak5/HoEGDPI8frmfPnk3OERsb26hH/Eiv069fvyaLjR3pdVrrgw8+oK6ujlGjRrFt2za2bdtGUVER48aNa7S6/M9+9jNsNhsffPAB4Cos//nPfzJ9+nSioqIAPMXhlVdeSWJiYqOv119/nZqaGkpLSxu9flZWVpNMRUVF/PrXvyY5OZnQ0FASExM9x7mfX1BQQFVVFX379m3y/B/ft3XrVkzTpF+/fk1ybdy4kYKCgiP+fNz/ri1tg9jaAt+tpd+DAwcOUFJS4umrP/zL/YHWj3M397N85JFHKCkpoX///gwbNow777yzya4QIiLSOdTjLiIinWbPnj2UlpY2WzC5hYaG8vXXXzN//nw+++wzZs6cyQcffMCpp57KrFmzsNvtLb6ON33prdXcgmXgGr1tTab2cKTXMX+0kF1ncRfnJ510UrOP79ixg969e5OWlsbJJ5/Mhx9+yD333MM333zDrl27+L//+z/Pse4PZp5++mlGjhzZ7PkiIiIa3W7u3/nCCy9kyZIl3HnnnYwcOZKIiAicTifTpk1r0wwOp9Pp2Z++uZ//jzMdLjo6mtTU1BaL3bVr15Kenu75EKMlLf0euL/Pyy67jCuvvLLZY4cPH97odnM/y1NOOYXt27fzySefMGvWLF5//XWef/55XnnlFa699tpWZRURkfahwl1ERDrN22+/DcDUqVOPepzNZmPy5MlMnjyZ5557jieeeIJ7772X+fPnM2XKlCMW0W3146nApmmybdu2RsVNbGxss/tq79y5s9E2Xt5k69WrF3PmzOHQoUONRls3bdrkebw99OrVi7Vr1+J0OhuNuh/L6+Tk5LBkyRJuvvlmJk6c2Ogxp9PJ5Zdfzj/+8Q/uu+8+wDWT4pe//CWbN2/mgw8+ICwsjLPOOsvzHPfU76ioKKZMmeJ1HoDi4mLmzp3Lww8/zAMPPOC5/8f/vklJSYSEhLBt27Ym5/jxfX369ME0TbKysujfv7/Xmc4880z+8pe/sGjRIs/K94dbuHAhubm53HDDDZ77jvX3OzExkcjISBwOR5t/lm5xcXFcffXVXH311ZSXl3PKKafw0EMPqXAXEelkmiovIiKdYt68eTz66KNkZWVx6aWXHvG4oqKiJve5R2BramoAPPt1N1dIt8Xf//73RtOZP/roI/bt29eo37lPnz5888031NbWeu779NNPm2wJ5k22M844A4fDwR//+MdG9z///PMYhtGk37qtzjjjDPLz8z1T1QHq6+t56aWXiIiIaFJ4t4Z7tP23v/0tF1xwQaOvCy+8kIkTJzaaLn/++edjt9t57733+Oc//8mZZ57ZaN/1MWPG0KdPH5555hnKy8ubvN6BAwdazOQeif7xDIQXXnihyXFTpkzh448/Zu/evZ77t23b1mRdgfPOOw+73c7DDz/c5LymaTa7zdzh7rzzTkJDQ7nhhhuaHFtUVMQvfvELwsLCGq24f6y/33a7nfPPP59//etfrFu3rsnjrflZAk3yRkRE0LdvX89/hyIi0nk04i4iIu3uiy++YNOmTdTX17N//37mzZvH7Nmz6dWrF//9738JCQk54nMfeeQRvv76a37yk5/Qq1cvCgoK+NOf/kSPHj08I5Z9+vQhJiaGV155hcjISMLDwxk3blyzfbqtERcXx4QJE7j66qvZv38/L7zwAn379m20Zd21117LRx99xLRp07jwwgvZvn0777zzTqNFwrzNdtZZZ5Gdnc29995Lbm4uI0aMYNasWXzyySfceuutTc7dVtdffz2vvvoqV111FStWrCAzM5OPPvqIxYsX88ILL7S6t/pw7777LiNHjiQjI6PZx88++2xuueUWVq5cyejRo0lKSiI7O5vnnnuOQ4cOcdFFFzU63maz8frrrzN9+nSGDBnC1VdfTXp6Onl5ecyfP5+oqCj+97//HTVTVFQUp5xyCr///e+pq6sjPT2dWbNmkZOT0+TYhx56iFmzZnHSSSdx4403ej5AGTp0KKtXr/Yc16dPHx577DHuvvtucnNzOffcc4mMjCQnJ4f//Oc/XH/99fzmN785YqZ+/frx1ltvcemllzJs2DCuueYasrKyyM3N5a9//SuFhYW89957jf6tx4wZA8C9997LjBkzCAwM5Kyzzmr0QUdLnnrqKebPn8+4ceO47rrrGDx4MEVFRaxcuZI5c+Y0+wHZjw0ePJhJkyYxZswY4uLiWL58OR999BE333xzq3OIiEg7sWYxexER6Yrc28G5v4KCgsyUlBTztNNOM//whz802nbM7cfbwc2dO9c855xzzLS0NDMoKMhMS0szL774YnPLli2NnvfJJ5+YgwcP9mzf5d5+beLEieaQIUOazXek7eDee+898+677zaTkpLM0NBQ8yc/+Ym5c+fOJs9/9tlnzfT0dDM4ONg86aSTzOXLlzc559Gy/Xg7ONM0zUOHDpm33XabmZaWZgYGBpr9+vUzn3766UZbopmmaxuwm266qUmmI21T92P79+83r776ajMhIcEMCgoyhw0b1uyWda3ZDm7FihUmYN5///1HPCY3N9cEzNtuu81z31/+8hcTMCMjI82qqqpmn7dq1SrzvPPOM+Pj483g4GCzV69e5oUXXmjOnTvXc4z7d6a5LQP37Nlj/vSnPzVjYmLM6Oho82c/+5m5d+9eEzAffPDBRsfOnTvXHDVqlBkUFGT26dPHfP3118077rjDDAkJaXLef/3rX+aECRPM8PBwMzw83Bw4cKB50003mZs3bz7qz8pt7dq15sUXX2ympqaagYGBZkpKinnxxReb33//fbPHP/roo2Z6erpps9kabQ3nze/B/v37zZtuusnMyMjwvObkyZPN1157zXOM+7+Bf/7zn03O+dhjj5ljx441Y2JizNDQUHPgwIHm448/btbW1rbqexYRkfZjmKZFK9qIiIiI+Jhzzz1XW6CJiIjPUY+7iIiIdEtVVVWNbm/dupXPP/+cSZMmWRNIRETkCDTiLiIiIt1SamoqV111Fb1792bnzp38+c9/pqamhlWrVtGvXz+r44mIiHhocToRERHplqZNm8Z7771Hfn4+wcHBjB8/nieeeEJFu4iI+ByNuIuIiIiIiIj4MPW4i4iIiIiIiPgwFe4iIiIiIiIiPkw97oDT6WTv3r1ERkZiGIbVcURERERERKSLM02TQ4cOkZaWhs129DF1Fe7A3r17ycjIsDqGiIiIiIiIdDO7d++mR48eRz1GhTsQGRkJuH5gUVFRFqcRERERERGRrq6srIyMjAxPPXo0KtzBMz0+KipKhbuIiIiIiIh0mta0a2txOhEREREREREfpsJdRERERERExIepcBcRERERERHxYepxFxERERERaYFpmtTX1+NwOKyOIn7CbrcTEBDQLluOq3AXERERERE5itraWvbt20dlZaXVUcTPhIWFkZqaSlBQ0DGdR4W7iIiIiIjIETidTnJycrDb7aSlpREUFNQuI6jStZmmSW1tLQcOHCAnJ4d+/fphs7W9U12Fu4iIiIiIyBHU1tbidDrJyMggLCzM6jjiR0JDQwkMDGTnzp3U1tYSEhLS5nNpcToREREREZEWHMtoqXRf7fV7o98+ERERERERER+mwl1ERERERETEh6lwFxEREREREa+8+eabxMTEHPN5DMPg448/PubzdHUq3EVERERERLqhq666inPPPdfqGNIKKtxFREREREREfJgKdxEREREREWnkueeeY9iwYYSHh5ORkcEvf/lLysvLmxz38ccf069fP0JCQpg6dSq7d+9u9Pgnn3zC6NGjCQkJoXfv3jz88MPU19d31rfRZahwFxERERERkUZsNhsvvvgi69ev56233mLevHn89re/bXRMZWUljz/+OH//+99ZvHgxJSUlzJgxw/P4woULueKKK/j1r3/Nhg0bePXVV3nzzTd5/PHHO/vb8Xsq3EVERERERKSRW2+9lezsbDIzMzn11FN57LHH+PDDDxsdU1dXxx//+EfGjx/PmDFjeOutt1iyZAnffvstAA8//DC/+93vuPLKK+nduzennXYajz76KK+++qoV35JfC7A6gIiIiIiIiPiWOXPm8OSTT7Jp0ybKysqor6+nurqayspKwsLCAAgICOD444/3PGfgwIHExMSwceNGxo4dy5o1a1i8eHGjEXaHw9HkPNIyFe4iIiIiIiLikZuby5lnnsmNN97I448/TlxcHIsWLeKaa66htra21QV3eXk5Dz/8MOedd16Tx0JCQto7dpemwl1EREREREQ8VqxYgdPp5Nlnn8Vmc3VX/3iaPEB9fT3Lly9n7NixAGzevJmSkhIGDRoEwOjRo9m8eTN9+/btvPBdlAp3ERERERGRbqq0tJTVq1c3ui8hIYG6ujpeeuklzjrrLBYvXswrr7zS5LmBgYHccsstvPjiiwQEBHDzzTdzwgkneAr5Bx54gDPPPJOePXtywQUXYLPZWLNmDevWreOxxx7rjG+vy9DidCIiIiIiIt3UggULGDVqVKOvt99+m+eee47/+7//Y+jQobz77rs8+eSTTZ4bFhbGXXfdxSWXXMJJJ51EREQEH3zwgefxqVOn8umnnzJr1iyOP/54TjjhBJ5//nl69erVmd9il2CYpmlaHcJqZWVlREdHU1paSlRUlNVxRERERETER1RXV5OTk0NWVpb6ssVrR/v98aYO1Yi7iIiIiIiIiA9T4S4iIiIiIiLiw1S4i4iIiIiIiPgwrSovIiIifiunsIKKmvp2O194cABZCeHtdj4REZH2oMJdRERE/FJOYQXZzyxo9/PO/80kFe8iIuJTVLiLiIiIX3KPtN+U3Zf0mNBjPl9eSRUvz9/WriP4IiIi7UGFu4iIiPi19JhQjZCLiEiXpsXpRERERERERHyYRtxFRERERES8lFdSRXFFbae9Xmx4ULu0BYl/UuEuIiIiIiLihbySKiY/u4DqOmenvWZIoI25d0zqtOI9NzeXrKwsVq1axciRI1v1nDfffJNbb72VkpISS3N0RSrcRUREREREvFBcUUt1nbPdFsdsiXvxzOKKWq9fb/fu3Tz44IPMnDmTwsJCUlNTOffcc3nggQeIj48/4vMyMjLYt28fCQkJrX6tiy66iDPOOMOrfNI6KtxFRERERETawNcXx9yxYwfjx4+nf//+vPfee2RlZbF+/XruvPNOvvjiC7755hvi4uKaPK+2tpagoCBSUlK8er3Q0FBCQzWdvyNocToREREREZEu6KabbiIoKIhZs2YxceJEevbsyfTp05kzZw55eXnce++9AGRmZvLoo49yxRVXEBUVxfXXX09ubi6GYbB69WrP+f773//Sr18/QkJCyM7O5q233sIwDM/U+DfffJOYmBjP8Q899BAjR47k7bffJjMzk+joaGbMmMGhQ4c8x8ycOZMJEyYQExNDfHw8Z555Jtu3b++MH49fUeEuIiIiIiLSxRQVFfHll1/yy1/+sskoeEpKCpdeeikffPABpmkC8MwzzzBixAhWrVrF/fff3+R8OTk5XHDBBZx77rmsWbOGG264wVP4H8327dv5+OOP+fTTT/n000/56quveOqppzyPV1RUcPvtt7N8+XLmzp2LzWbjpz/9KU5n560f4A80VV5ERERERKSL2bp1K6ZpMmjQoGYfHzRoEMXFxRw4cACAU089lTvuuMPzeG5ubqPjX331VQYMGMDTTz8NwIABA1i3bh2PP/74UXM4nU7efPNNIiMjAbj88suZO3eu53nnn39+o+PfeOMNEhMT2bBhA0OHDm39N9zFacRdRERERESki3KPqLfkuOOOO+rjmzdv5vjjj29039ixY1s8b2ZmpqdoB0hNTaWgoMBze+vWrVx88cX07t2bqKgoMjMzAdi1a1ercncXKtxFRERERES6mL59+2IYBhs3bmz28Y0bNxIbG0tiYiIA4eEds8heYGBgo9uGYTSaBn/WWWdRVFTEX/7yF5YtW8ayZcsA1wJ58gMV7iIiIiIiIl1MfHw8p512Gn/605+oqqpq9Fh+fj7vvvsuF110EYZhtOp8AwYMYPny5Y3u++67744p48GDB9m8eTP33XcfkydP9kzfl6bU4y4iIiIiItIGeSVVLR9k4ev88Y9/5MQTT2Tq1Kk89thjjbaDS09Pb7E//XA33HADzz33HHfddRfXXHMNq1ev5s033wRodfH/Y7GxscTHx/Paa6+RmprKrl27+N3vftemc3V1KtxFRERERES8EBseREigjZfnb+u01wwJtBEbHuTVc/r168fy5ct58MEHufDCCykqKiIlJYVzzz2XBx98sNk93I8kKyuLjz76iDvuuIM//OEPjB8/nnvvvZcbb7yR4OBgb78dAGw2G++//z6/+tWvGDp0KAMGDODFF19k0qRJbTpfV2aYrV2toAsrKysjOjqa0tJSoqKirI4jIiIirbAur5QzX1rEEz8dRlbCsfdm5hRWcM9/vufTWyYwND26HRKKSFdQXV1NTk4OWVlZhISEeO7PK6miuKLz+rBjw4NIjwlt+cBO9Pjjj/PKK6+we/duq6P4rCP9/oB3dahG3EVERERERLyUHhPqc4V0R/vTn/7E8ccfT3x8PIsXL+bpp5/m5ptvtjpWt6DCXURERERERFq0detWHnvsMYqKiujZsyd33HEHd999t9WxugUV7iIiIiIiItKi559/nueff97qGN2StoMTERERERER8WEq3EVERERERER8mAp3ERERERERER+mwl1ERERERETEh6lwFxEREREREfFhWlVeRERERETEWyW7ofJg571eWDzEZHTe64lPUeEuIiIiIiLijZLd8PLxUFfVea8ZGAo3fafi/RhkZmZy6623cuutt7bbOSdNmsTIkSN54YUX2u2czVHhLiIiIiIi4o3Kg66i/eQ7ILoTCunS3bDwWdfrtrJwf+WVV7jzzjspLi4mIMBV9pWXlxMbG8tJJ53EggULPMcuWLCA7Oxstm3bRp8+fY54TvdxxcXFxMTEHMt3JF5S4S4iIiIiItIW0RkQ39fqFM3Kzs6mvLyc5cuXc8IJJwCwcOFCUlJSWLZsGdXV1YSEhAAwf/58evbsedSivT2ZponD4fB8oNDRamtrCQoK6pTX6ihanE5ERERERKSLGTBgAKmpqU1G1s855xyysrL45ptvGt2fnZ3N22+/zXHHHUdkZCQpKSlccsklFBQUAJCbm0t2djYAsbGxGIbBVVddBYDT6eTJJ58kKyuL0NBQRowYwUcffdTo/IZh8MUXXzBmzBiCg4NZtGgRkyZN4pZbbuHWW28lNjaW5ORk/vKXv1BRUcHVV19NZGQkffv25YsvvvCcy+FwcM0113hea8CAAfzhD39o9L1fddVVnHvuuTz++OOkpaUxYMCAZn9Gr7/+OjExMcydOxeAdevWMX36dCIiIkhOTubyyy+nsLDQc3xFRQVXXHEFERERpKam8uyzz7bhX6ZtVLiLiIiIiIh0QdnZ2cyfP99ze/78+UyaNImJEyd67q+qqmLZsmVkZ2dTV1fHo48+ypo1a/j444/Jzc31FOcZGRn861//AmDz5s3s27fPUzA/+eST/P3vf+eVV15h/fr13HbbbVx22WV89dVXjfL87ne/46mnnmLjxo0MHz4cgLfeeouEhAS+/fZbbrnlFm688UZ+9rOfceKJJ7Jy5UpOP/10Lr/8ciorKwHXhwQ9evTgn//8Jxs2bOCBBx7gnnvu4cMPP2z0WnPnzmXz5s3Mnj2bTz/9tMnP5ve//z2/+93vmDVrFpMnT6akpIRTTz2VUaNGsXz5cmbOnMn+/fu58MILPc+58847+eqrr/jkk0+YNWsWCxYsYOXKlcfyT9RqmiovIiIiIiLSBWVnZ3PrrbdSX19PVVUVq1atYuLEidTV1fHKK68AsHTpUmpqasjOzqZnz56e5/bu3ZsXX3yR448/nvLyciIiIoiLiwMgKSnJ0+NeU1PDE088wZw5cxg/frznuYsWLeLVV19l4sSJnnM+8sgjnHbaaY0yjhgxgvvuuw+Au+++m6eeeoqEhASuu+46AB544AH+/Oc/s3btWk444QQCAwN5+OGHPc/Pyspi6dKlfPjhh42K7PDwcF5//fVmp8jfddddvP3223z11VcMGTIEgD/+8Y+MGjWKJ554wnPcG2+8QUZGBlu2bCEtLY2//vWvvPPOO0yePBlwfejQo0cPb/5J2kyFu4iIiIiISBc0adIkKioq+O677yguLqZ///4kJiYyceJErr76aqqrq1mwYAG9e/emZ8+erFixgoceeog1a9ZQXFyM0+kEYNeuXQwePLjZ19i2bRuVlZVNCvLa2lpGjRrV6L7jjjuuyfPdI+8Adrud+Ph4hg0b5rkvOTkZwDNlH+Dll1/mjTfeYNeuXVRVVVFbW8vIkSMbnXfYsGHNFu3PPvssFRUVLF++nN69e3vuX7NmDfPnzyciIqLJc7Zv3+55nXHjxnnuj4uLO+I0/Pamwl1ERESkuzu4HWoOte85gyMhvnMWuhKR5vXt25cePXowf/58iouLPaPfaWlpZGRksGTJEubPn8+pp55KRUUFU6dOZerUqbz77rskJiaya9cupk6dSm1t7RFfo7y8HIDPPvuM9PT0Ro8FBwc3uh0eHt7k+YGBgY1uG4bR6D7DMAA8HyK8//77/OY3v+HZZ59l/PjxREZG8vTTT7Ns2bIWXwvg5JNP5rPPPuPDDz/kd7/7XaPv46yzzuL//u//mjwnNTWVbdu2NXu+zqLCXURERKQ7O7gdXhrdMee+ZaWKdxGLZWdns2DBAoqLi7nzzjs9959yyil88cUXfPvtt9x4441s2rSJgwcP8tRTT5GR4dpybvny5Y3O5R7BdjgcnvsGDx5McHAwu3btajQtvqMsXryYE088kV/+8pee+7Zv397q548dO5abb76ZadOmERAQwG9+8xsARo8ezb/+9S8yMzObXe2+T58+BAYGsmzZMk9LQXFxMVu2bOmU71uFu4iIiEh35h5pb8/9qN17Trf3KL6Irynd7fOvk52dzU033URdXV2jAnPixIncfPPN1NbWkp2dTUBAAEFBQbz00kv84he/YN26dTz66KONztWrVy8Mw+DTTz/ljDPOIDQ0lMjISH7zm99w22234XQ6mTBhAqWlpSxevJioqCiuvPLKNmdvTr9+/fj73//Ol19+SVZWFm+//TbfffcdWVlZrT7HiSeeyOeff8706dMJCAjg1ltv5aabbuIvf/kLF198Mb/97W+Ji4tj27ZtvP/++7z++utERERwzTXXcOeddxIfH09SUhL33nsvNlvnrPeuwl1EREREfHo/ahGfExYPgaGuD6g6S2Co63W9lJ2dTVVVFQMHDvT0i4OrcD906JBn2ziAN998k3vuuYcXX3yR0aNH88wzz3D22Wd7npOens7DDz/M7373O66++mquuOIK3nzzTR599FESExN58skn2bFjBzExMYwePZp77rnn2L/vH7nhhhtYtWoVF110EYZhcPHFF/PLX/6y0ZZxrTFhwgQ+++wzzjjjDOx2O7fccguLFy/mrrvu4vTTT6empoZevXoxbdo0T3H+9NNPe6bUR0ZGcscdd1BaWtru32NzDNM0zU55pWZ8/fXXPP3006xYsYJ9+/bxn//8h3PPPfeHcA39DD/2+9//3jPNIzMzk507dzZ6/Mknn2zUr9CSsrIyoqOjKS0tJSoqyvtvRERERDrdurxSznxpEU/8dBhZCc33Mnojp7CCe/7zPZ/eMoGh6dHtkNBP7F0Nr02EM19ov8L94Db49Fa4/itIG9k+5xSxSHV1NTk5OWRlZRESEvLDAyW7ofJg5wUJi4eYdpoVI53miL8/eFeHWjriXlFRwYgRI/j5z3/Oeeed1+Txffv2Nbr9xRdfcM0113D++ec3uv+RRx7xbBcAEBkZ2TGBRUREREREwFVEq5CWTmJp4T59+nSmT59+xMdTUlIa3f7kk0/Izs5utGw/uAr1Hx8rIiIiIiIi0hV0Tid9O9i/fz+fffYZ11xzTZPHnnrqKeLj4xk1ahRPP/009fX1Rz1XTU0NZWVljb5EREREREREfJHfLE731ltvERkZ2WRK/a9+9StGjx5NXFwcS5Ys4e6772bfvn0899xzRzzXk08+ycMPP9zRkUVERERERESOmd8U7m+88QaXXnppk4b+22+/3XN9+PDhBAUFccMNN/Dkk08SHBzc7LnuvvvuRs8rKyvz7FUoIiIiIiLyYxau6S1+rL1+b/yicF+4cCGbN2/mgw8+aPHYcePGUV9fT25uLgMGDGj2mODg4CMW9SIiIiIiIm6BgYEAVFZWEhoaanEa8TeVlZXAD79HbeUXhftf//pXxowZw4gRI1o8dvXq1dhsNpKSkjohmYiIiIiIdGV2u52YmBgKCgoACAsLO+K21SJupmlSWVlJQUEBMTEx2O32YzqfpYV7eXk527Zt89zOyclh9erVxMXF0bNnT8A1jf2f//wnzz77bJPnL126lGXLlpGdnU1kZCRLly7ltttu47LLLiM2NrbTvg8REREREem63DtYuYt3kdaKiYlplx3QLC3cly9fTnZ2tue2u+/8yiuv5M033wTg/fffxzRNLr744ibPDw4O5v333+ehhx6ipqaGrKwsbrvttkb96yIiIiIiIsfCMAxSU1NJSkqirq7O6jjiJwIDA495pN3N0sJ90qRJLTbrX3/99Vx//fXNPjZ69Gi++eabjogmIiIiIiLSiN1ub7dCTMQbfrOPu4iIiIiIiEh35BeL04mIiIhIg4PboeZQ+52vcEv7nUtERDqECncRERERf3FwO7w0umPOHahtrkREfJUKdxERERF/4R5pP/kOiM5ov/MGhkJUevudT0RE2pUKdxERERF/E50B8X2tTiEiIp1Ei9OJiIiIiIiI+DAV7iIiIiIiIiI+TIW7iIiIiIiIiA9T4S4iIiIiIiLiw1S4i4iIiIiIiPgwFe4iIiIiIiIiPkyFu4iIiIiIiIgPU+EuIiIiIiIi4sNUuIuIiIiIiIj4MBXuIiIiIiIiIj5MhbuIiIiIiIiID1PhLiIiIiIiIuLDVLiLiIiIiIiI+DAV7iIiIiIiIiI+TIW7iIiIiIiIiA9T4S4iIiIiIiLiw1S4i4iIiIiIiPgwFe4iIiIiIiIiPkyFu4iIiIiIiIgPU+EuIiIi0h2ZJlQVQ/FOqK+xOo2IiBxFgNUBRERERKQTmCbkLYd9a6E4x/VVXdrwoAERSRDdA6J7Qp9TIS7L0rgiIvIDFe4iIiIiXV3+97DyTTiwufH9hg0CQqCuEsr3u77yVsCG/0CP42HYhZA0yJLIIiLyAxXuIiIiIl1VUY6rYM9b4bodEAy9syG+n2tEPaYn2INdI++le6BsD+xdDTsXw57vXF/JQ2HM1ZA4wMrvRESkW1PhLiIiItIVbfocvn0FTCcYdug/FUZcDKGxTY8NjXF9pQyF/tOgLA/W/Qu2z4P96+CL38JxP4dBZ4NhdPZ3IiLS7alwFxEREelKTBNWvwNrP3Dd7jneNWIeldb6c0Slw4m/ghGXwPK/Qu5C+O4vULARTvoVBIZ1THYREWmWCncRERGRrsLpgKV/hG2zXbdHXOIaZW/rKHl4ApzyW0ga7Crgdy6C4lyYdDfE9mq32CIicnTaDk5ERESkK6ivhvmPuYp2wwbjb4aRlxz71HbDgEFnwdQnISze1Qf/xZ1QuLV9couISIs04i4iIiLSkQ5uh5pD7XOuwi3N3286YeGzrsXk7EGuUfKeJ7TPa7olDYKzXoT5j0PBBph9v6uY17ZxIiIdToW7iIiISEc5uB1eGt3+5w0MbXx73UewaynYAmDKI65F5jpCSDRMeQhm3Q+Fm13F+7QnITqjY15PREQAFe4iIiIiHcc90n7yHe1X3AaGuhaPc8tbASvfdl0f94uOK9o9rx8Gpz0MX94LRdtdl9P+D6JSO/Z1RUS6MRXuIiIiIh0tOgPi+7b/eQ/lw9dPAyb0m+rayq0zBEXAaY/AzLuhdBfMuhd+8mzzW82JiMgx0+J0IiIiIv6ovtrVb15bDgn9XaPtnSkkGqY+7tpmrqIAvvo/cNZ3bgYRkW5CI+4iIiLSpThNk50HK1m/t5T1e8uoqKknOjSQqNBAokICSYoKZlxWHGFBfv42aNmrUJzjKqAn3QP2wM7PEBoLpz4In90K+9fBirfg+Gs6P4eISBfn53+xREREpCPkFFZQUePbo6fbCsob3d5XWsW/VuxhzZ5SylvI/vbSnUwakMjUISkkR4V0ZMyOkb+2Ya92Aybe5dpv3SrR6XDSbbDgCdjwH0gcAJEp1uUREemCVLiLiIhIIzmFFWQ/s8DqGK1mM+DD5bv535q91DtNAEICbQxKiWJoejTxEUGUVdVTVl1HaVUd6/eWsrekmi/W5TNzXT5jesVyydieFn8XXnDUwTd/dl3vPw1ShlubB6DXiTD0fFj3L1j8Bzjp11YnEhHpUlS4i4iISCPukfabsvuSHhPawtHW2lZwiGdnbeFAeQ0Aw3tEc96oHvRJCifA1vxSPk7TZO2eUmau28eaPaUs31nM+r1lnDc6vdnjfc6G/0DpbgiJgdFXWp3mB6OugMKtrtkAK/5mdRoRkS5FhbuIiIg0Kz0mlKyEcKtjNMtpmvx96U6+XJ8PQHx4EFeMz+T4zFgMwzjqc22GwciMGEZmxJBXXMXri3awKf8Q7y7bBYCjYdTeJx3KhzUfuK4fdw0ER1ib53A2O5xyJ/zv11C+3+o0IiJdilaVFxEREb/icJq8smC7p2g/a3gqz/xsBGOz4los2n8sPTaUe38yiDOG/bAH+f2frKOwYQTfp5gmfPsqOGpc0+N7T7I6UVOhsTDxt0DDv8PuZZbGERHpKlS4i4iIiN+orXfywpwtLNxWiM2AX07qwyXjehESaG/zOQNsNi4/oRczjs8AYO2eUi58dSlFFbXtFbt97P4G9nwHtgA44Ubw8kOKTpM8FLJOdl3/+mmoLrU2j4hIF6DCXURERPxCdZ2D33+5ieU7iwm0G9x2Wn9O7pfYbucf3iMGgMSIYHYcqODnb35HZa2PrKzvqIVvX3NdH3IeRGdYm6clA890XVYcgFn3WZtFRKQLUOEuIiIiPs/hNHlm1mbW7y0jJNDGXdMGclyvuA55rUfOGUJ0aCCrd5dw8z9WUe9wdsjreGXLl64iODwRhl9odZqW2YMarhiw8u+wba6lcURE/J0KdxEREfF5/1i201O033vGYIakRXfYa2XEhfHGVccRHGBj3qYC7vnP95imhQvW1dfA9/90XR92IQT40b7zQ89zXf73V1BdZm0WERE/psJdREREfNqibYV8vs61EN2NE/vSN6njV1If0yuOP14yumGP+D08O2tLh7/mEW2ZCVVFrtH2vlOsy9EWx18HsZlQtgdmP2B1GhERv6XCXURERHxWTmEFr329HYBzR6YzNqtjpsc357TByTzx02EA/HH+NuZvKui01/aor4F1H7muD78I7IGdn+FYBIbCOS+7rq94E/ausjSOiIi/UuEuIiIiPqmsuo7nZm+mzmEyMiOGn43p0ekZZoztydUnZQJw50drOdjZ28RtmQlVxRCeBH0md+5rt5fMCa4p/pjw+W9d29qJiIhXVLiLiIiIz3GaJi/N20ZheS3JUcHclN0Xm82a7c/umjaQ/skRFJbXcPe/O7Hfvb76h9724Rf632j74U57GALDYc+3sPZDq9OIiPgdFe4iIiLic+ZtKmBdXinBATbuOG0AEcEBlmUJCbTzwkWjCLLbmLVhPx8u3905L7x5JlSXQESy//W2/1hUGpzyG9f12Q9AzSFr84iI+BkV7iIiIuJTiipq+ceyXQBcdHwGGXFhFieCwWlR3HF6fwAe/t8GcgsrOvYF66sb97bbrPvgot2MvwniekN5Pnz9tNVpRET8igp3ERER8RmmafLG4hyq6hz0TYpg6uAUqyN5XHtyb07oHUdlrYPbPlzdsfu7b5v3w2h7n1M77nU6U0AwTHvKdX3pn6Bwm7V5RET8iAp3ERER8Rnf7Chixc5i7DaD60/ubVlfe3PsNoNnLxxJZEgAq3aV8PY3OzvmhUwTNv3PdX3wOV1jtN2t/1Todzo462Dm76xOIyLiN1S4i4iIiE8or67nzaW5AJw7Ms0npsj/WHpMKHdPHwTA87O3dMwq8/vWQOluCAiFPn7e296cqU+CLRC2zYYdX1mdRkTEL6hwFxEREZ/wzrKdlFXVkR4Tyjkj062Oc0QXHZ/B4NQoyqrreXb2lvZ/Afdoe9/JEOR7H14cs4S+cNzVrutzH9b2cCIiraDCXURERCy3cV8ZX205gAFcf0pvAu2++xbFbjN46OwhALz37S7W7y1tv5Mfyofd37quDzyz/c7ra06507U9XN4K2PSp1WlERHye7/5VFBERkW7BNE3eXebqFz91YBL9kyMtTtSysVlxnDk8FdOEh/+7of32dt/0GWBC2iiI7tE+5/RFEUkw/peu63MfBafD2jwiIj5OhbuIiIhY6psdRWw/UEFwgI0LxvhPsXrPGYMICbTxbW4Rn67dd+wnrKuGbbNc1wedfezn83Un3gKhsVC4Gda8b3UaERGf1oWWKRURERF/U+9w8sFy157tZ41IIyYsyOJEsK2gvNXHnj+6B+8u28Uj/9tAanQIIYH2xgccqCXcmUJWa062Yz7UVkBkKqSP8SqzXwqJhgm3w+z7YcGTMPR8CAyxOpWIiE9S4S4iIiKWmbOxgP1lNUSHBvKTYamWZgkJdE1EvPWD1V4/90B5DRe8svQIjz7H/EMHyYo/ygkO3wJuwE/A6CaTIsdeB9/82bWK/vI3fpg+LyIijahwFxEREUtU1tbz71V7ALhgTI+mo9WdLDU6lOcuHEF1ndOr563ZU8IH3+0mJNDGnacPJDToh+8jb3cOLy8vp6K+hf3o87+Hkl0QEAL9uuAWcEcSGAqT7oL//RoWPgOjL4dg31/jQESks6lwFxEREUv8b81eDlXXkxYdQvaAJKvjAK7i3Vu94sNYvK2QPcVVbNhXygVjMn54sKSVb7W2fOG67J0NQRFeZ/BrIy+DJS/BwW3w3esw4TarE4mI+JxuMg9LREREfElRRS2ff58PwMVje2K3tTAi7cNshsH5o12L6n3+fT7lNfXenaC2HHZ947ref2o7p/MD9gDX9nAAS/4ItZXW5hER8UEq3EVERKTT/WdVHrUOJwOSIxnTK9bqOMdsbFYcGXFhVNU5+OJ7L1eYz1kIzjqI6QVxfTomoK8beoHr+68shJVvWZ1GRMTnqHAXERGRTlVcWctXWwoAuPD4DAzDf0fb3Vyj7ukAfLEun/JqL0bdt89xXfadDF3gZ9Em9gA4+XbX9cV/gPoaa/OIiPgYSwv3r7/+mrPOOou0tDQMw+Djjz9u9PhVV12FYRiNvqZNm9bomKKiIi699FKioqKIiYnhmmuuoby89du4iIiISOf6bO0+6hwmA5IjGZTSdRYiOz4zjp4No+6ftXbUvXQPHNjsWkW+d3bHBvR1Iy6GqHQ4tA9Wv2t1GhERn2Jp4V5RUcGIESN4+eWXj3jMtGnT2Ldvn+frvffea/T4pZdeyvr165k9ezaffvopX3/9Nddff31HRxcREZE2OFRdx5yN+wE4Z2Ralxhtd7MZBhc09LrPXL+PQ9V1LT9p+1zXZfoYCPX/loFjEhAMJ/3adX3R8+Boxc9PRKSbsHRV+enTpzN9+vSjHhMcHExKSkqzj23cuJGZM2fy3XffcdxxxwHw0ksvccYZZ/DMM8+QlpbW7plFRESk7Wauy6em3klmfBgjM2KsjtPujsuMpVd8GDsPVvLZ9/sYd7Ra3OmA7fNd1/tM7pR8Pm/0FfD1066t8b7/J4y8xOpEIiI+wee3g1uwYAFJSUnExsZy6qmn8thjjxEfHw/A0qVLiYmJ8RTtAFOmTMFms7Fs2TJ++tOfNnvOmpoaamp+6J0qKyvr2G9CREREqKyt58v1rpXkzx2Z3qVG292MhlH3Z2dvYdb6/Yw44Shbu+WvdS3GFhQBGWM7L6QvCwyFE2+B2Q/Awmdh+EVgs7seO7gdag617+sFR0J8N10QUET8ik8X7tOmTeO8884jKyuL7du3c8899zB9+nSWLl2K3W4nPz+fpKTG+74GBAQQFxdHfn7+Ec/75JNP8vDDD3d0fBERETnMnA37qah1kBYdwvGZcVbH6TCje8WSFh3C3tJqlu87yiJr7mnyWRPBHtQ54fzBcT93TZU/uA02fAxDz3cV7S+N7pjXu2WlincR8Xk+XbjPmDHDc33YsGEMHz6cPn36sGDBAiZPbvuUsrvvvpvbb7/dc7usrIyMjIxjyioiIiJHVlvv5LN1rg/Vzx6Zjs2P921vic0wOGN4Kq8vzGHJniMU7rUVsHOp63qfUzsvnD8IjoRxN8KCJ2DJSzDkvB9G2k++A6Lb6T1b6W7XqH57j+KLiHQAny7cf6x3794kJCSwbds2Jk+eTEpKCgUFBY2Oqa+vp6io6Ih98eDqmw8ODu7ouCIiItJg3qYCyqrqSIgI4qS+8VbH6XAn903kw+92U3KkbeFyF4GjxlWEJvTv3HD+4PhrYdFzsHcV7FwCQeGu+6MzIL6vtdlERCzgV/u479mzh4MHD5KamgrA+PHjKSkpYcWKFZ5j5s2bh9PpZNy4cVbFFBERkcM4nSafN2yPdtaINAJsfvX2o02CAmycNviHQQTT/NEB2+e5Lvt0473bjyY83rU9HMDSI+8+JCLSXVj6l7O8vJzVq1ezevVqAHJycli9ejW7du2ivLycO++8k2+++Ybc3Fzmzp3LOeecQ9++fZk6dSoAgwYNYtq0aVx33XV8++23LF68mJtvvpkZM2ZoRXkREREfsWJXMQfKa4gIDmBi/0Sr43Sa0wcnE9DwTmt9yWGTHCsKoWCD63rviZ0fzF+c8EvX5ebPXfvdi4h0Y5YW7suXL2fUqFGMGjUKgNtvv51Ro0bxwAMPYLfbWbt2LWeffTb9+/fnmmuuYcyYMSxcuLDRNPd3332XgQMHMnnyZM444wwmTJjAa6+9ZtW3JCIiIj8ys6G3/dSBSQQH2C1O03miQgMZneJadO7jXSE/PLBrCWBC4iAI7z4fZHgtsT/0nwaYrq3hRES6MUt73CdNmoTZZO7YD7788ssWzxEXF8c//vGP9owlIiIi7WTnwQo27CvDZrhGoLubkzKC+XZvLcsKA9lR4qB3jN3V3w6QOcHacP5g/E2wZSZs/sLqJCIilur6TWYiIiJiGfe+7cdnxhEf0f0Whk0Mc80wMDH46/e1jafJ9zrJwmR+IvNkSBnuWshPRKQbU+EuIiIiHaKsuo5F2woBmDb0yLu9dBf/2lJHyfZvXTeSBkN4grWB/IFhwPibf7jtOMIq/SIiXZwKdxEREekQ8zYVUOcwyUoIZ0BypNVxLNU7op7qevhoY6XrDo22t96Qn0JYw4cce1dam0VExCIq3EVERKTd1TudzN6wH4CpQ1IwuvmWZ2f0cE31frd0GE7TUOHujYAgGHq+63rOV83srSci0vWpcBcREZF2911OMUUVtUSFBnJin3ir41huYnINkfZ6csxUlkSfoWny3hp4puuyLA8ObLI2i4iIBVS4i4iISLubuX4fAFMGJRFo19uN0AA4L9Q1zfsd5+kWp/FDIVE/XN/8uXU5REQsor+kIiIi0q52Hqxgy/5y7IbBlEHdbwu45gTWFHNprWsv8tkHE8ivcFqcyI/lLoTqUqtTiIh0KhXuIiIi0q7mbSoA4LjMWGLDgixO4xuiDq6mvy2PscE7cZgG722stTqSf4rOAGc9bJ1tdRIRkU6lwl1ERETaTU29w7MF3KkDkyxO4zuiC13T5C/PqgDgvY111Dm0yJrXMie4Lrd8AU6HtVlERDqRCncRERFpN9/sKKKy1kFSZDBD06OtjuMTkigm7FAOYDB1dD8SQg0KKk3m7NSe5F5LGwVBEVC+X1vDiUi3osJdRERE2s38hmny2QOSsHXzLeDcptgbCszE/gRFxnPRwEAA3l6v6fJeswdB39Nc1zd9Zm0WEZFOpMJdRERE2sXuoko27z+EzYCJAxKtjuMzpthWuK5kjAPg4kFBGMCSvQ62l2i6t9cGTHdd5q2AQ/nWZhER6SQq3EVERKRdzNvsGm0f3VOL0rkFOKo4ybbedaOHq3DvEWnj1J4BAHy4qc6qaP4rKg3SRgOmq9ddRKQbUOEuIiIix6y23snCrQcAmDxIi9K5pZSsItioozYkAWJ6eu6/sGG6/L+2aJG6NhnwE9fl1tng0IcfItL1qXAXERGRY/ZtbhEVNQ4SIoIYnh5jdRyfkV78DQBlccPgsJ7/U3sGkBBqUFhlsmC3FqnzWo/jICweaspg9zdWpxER6XAq3EVEROSYzd24H4BJA5Kw2bQoHQBOB+nF3wJwKG5Yo4cC7QY/7ecadf9ws0aMvWazQ58pruva011EugEV7iIiInJM9pZUsSn/EIYBk/prUTq3yMJVhNSXUWKGUxHZp8nj7uny83bWU1Dp7Ox4/q9fw+rye1dBeYG1WUREOliA1QFERETEv321xdXbPrJHDPERwRan8R2xe+YAMN85EqMiCA40XUF+QJyNzUVOXllVy3n9A1t13vAgyIq2t2tWvxSZAinDIX8tbJ8LIy62OpGISIdR4S4iIiJt5nSaLNpWCMBEjbY3Ert7LgBzHGP47Nto+LbiiMe+sa6WN9a1fl/3+TPCVbyDa0/3/LWwbQ4MvwgMTSYVka5JhbuIiIi02bq9pRRV1BIebGd0r1ir4/iMkLIdhJVtx2kEMNW2jJNGDIaIpqvtV9ebPLmshjon/GJEED2jjl545pU7eXlVLRWtr/G7tl4nwrJXoHy/q4BPHWl1IhGRDqHCXURERNrs662u0fbxvRMItHeB0c6yPKirOubTxO74p+t00f1JPFhGYqQDopv/+ZyQZmfhHgebix1M7Km3Zl4JCIbeE2Hz565F6lS4i0gXpb8OIiIi0iaVtfV8l1MEwMT+CRanaQdlefDv69vlVHG2XDCguOggEOcqMI9gUkYAC/c4WJrn4IohJiEBWpXfK31PcxXuO5dATTkER1idSESk3alwFxERkTZZllNErcNJWnQIfRK7QLHkHmkffiGEN53W3loBdeVErvodAEWjboLwNAg/8gcbg+JtpIQb5FeYLNvnYGKG3p55Jb4vxGZCcS7kLICBZ1ocSESk/ekvg4iIiLTJ1w2ryZ/cPxHD6EKjxOFJEJ3e5qfH7FuAgUlFRCa1ScNbPN4wDE7pEcCHm+v4end91yrcC7d0/HkMA/qdDt++5pour8JdRLqgLvSXQURERDpLQVm1a+924OS+XWCafDuKKVwFQEnCmFY/Z0IPOx9urmPjQScHq5zEh/r5egGBoa7Lf1/XMef9saxJsPwNKNoORTsgrnf7vq6IiMVUuIuIiIjX3IvSDUmP1t7thzOdxBx0Fe7FCaNb/bTEMBsD42xsKnKyaI+Dc/r5eeEelQ4/fbVdFvrzCAx1nbc5IVGQMQ52Lobt81W4i0iXo8JdREREvGKaJgu3uqbJn9JPo+2HiyjbRmBdGfUBYZRHD/DquSf3CGBTUS0L99Rzdt8A/28/OFKR3VF6n+oq3HO+gjFXgU373ItI1+HnH+eKiIhIZ9ucf4iCQzWEBNo4PjPO6jg+xT1NvjRuOKbNu/GRcWl2Am2QV26SW2Z2RLyuLX00BEdCVRHkf291GhGRdqXCXURERLyyaJtrmvy4rHhCAjWqebiYgysBKIlvfX+7W3igwehk189z0Z76ds3VLdgDIfNk1/Ud863NIiLSzlS4i4iISKvVO5x8k3MQgAlalK6RgNoyIkq3AlCSMKpN5zi5h6twX5xXj8OpUXev9c52Xe5cAvXV1mYREWlHKtxFRESk1dbsKaWixkFMWCCDU6OsjuNToovWYOCkMqIntSFt+1BjRJKdiEAorYHvC53tnLAbSBwIEclQXwW7llmdRkSk3ahwFxERkVZbvN01Tf7E3vHYbH6+eFo7iyl0TZMvjm/9avI/FmAzODHd1Ruv6fJtYBg/jLpruryIdCEq3EVERKRVquscrMgtBuBETZNv7LBt4Eq82AauOe7p8t/tc1BVr+nyXus9yXW5dyVUlViZRESk3ahwFxERkVZZvrOYWoeTlKgQeieEWx3Hp4QdyiWotgSHPYRDMYOO6Vx9YmykhBvUOl3Fu3gpugfE9wPTCbkLrU4jItIuVLiLiIhIqyxuWE3+pL7x/r/HeDuLPbgCcG8DF3hM5zIMg5N7uKbLL9R0+bbpo+nyItK1qHAXERGRFpVV1bF2TwkAJ/bRNPkfc/e3lxxDf/vhTkp3TZdfX+ikpFrT5b2WeTIYNijcAqV5VqcRETlmKtxFRESkRd/kHMRpQlZCOGkxoVbH8Sn2unIiSzcDbd8G7seSw230ibFhAt/s06i710JjIa3h3yJngaVRRETagwp3ERERadGSbdq7/Uiii9ZimE4qw3tQE5rcbuc9sWHUfWme+tzbJGui6zJ3IZiatSAi/k2Fu4iIiBzVgUPVbN5/CAM4oXe81XF8jmc1+fj2GW13OyHNjgFsKXZyoFJ7unst4wSwBULpHijZaXUaEZFjosJdREREjmrJdtdo++C0KOLCgyxO42NMk+iDqwEojR/ZrqeOC7ExKN71Vm3pXo26ey0oDNLHuK5rdXkR8XMq3EVEROSo3IW7FqVrKqQqn5DqAziNAMpih7T7+U9Md60uvyRPfe5tkjnBdZm7SNPlRcSvqXAXERGRI8orqWJXUSV2w2BsZpzVcXxO9ME1AByKGYDTHtLu5x+basduwM4yk7xDmi7vtYyxYA+CsjwozrE6jYhIm6lwFxERkSNatsM12j6sRzQRIQEWp/E90UWrASiNG9Eh548MMhie6Hq7tmSvRt29FhgG6ce5rmu6vIj4MRXuIiIickTfNBTuJ/TWaHsTpoPoou+B9u9vP9wP0+UdmJru7T1NlxeRLkCFu4iIiDRrf1k1u4ursNsMjuulwv3HIsq2E1BfQX1AGOWRfTrsdY5LsRNkg/wKk73lKjy91uN4sAfDoX1QtN3qNCIibaLCXURERJr1fV4pACN6RBMerGnyPxZd5OpvL40bDjZ7h71OSIDB6BTX+dce0OryXgsMdRXvoOnyIuK3VLiLiIhIs9yFu/Zub557YbrSuOEd/lonpqlwPyaaLi8ifk6Fu4iIiDTrwKEaAu0GY3rFWh3F59gc1USWbAKgNG5kh7/eiCQ7oQFQWtvhL9U19TgOAoKhfD8c3Gp1GhERr6lwFxERkSMa0SOGsCBNk/+xqOIN2Mx6akISqQ5L7fDXC7IbHJfScdPxu7yAEOgx1nU9R9PlRcT/qHAXERGRRg5fuVzT5Jvn7m8viRsBhtEpr3lC2g8foDg13dt7mSe7Lnct0XR5EfE7KtxFRESkkZzCCgACbAaje2qafHM8/e3xHbN/e3OGJdgIbhh031Tk7LTX7TLSR7tWly/fD0U7rE4jIuIVFe4iIiLSyKJthQAMSIkkNEjTs38ssKaE8PJcoHMWpvO8rt1gcLzrrdvCPfWd9rpdRkCIq3gH2LXU2iwiIl5S4S4iIiIepmmycKurcB+WHm1xGt/kniZfEZlFfVDn/oyGJbg+SFmc59B0+bboOd51qcJdRPyMCncRERHx2JR/iH2l1YBrxF2aii5aCzT0t3eyvrGut25F1SYr8rU1nNd6jAXDDiU7ofyA1WlERFpNhbuIiIh4fLEu33M9OEDT5JswTU/hXmpB4R5g+2EhvM92aLq814IjIGWY63r+WmuziIh4QYW7iIiIeHx5WOEuTQVX5RNcfQCnEcCh2EGWZvl8R52my7dFzxNdlyrcRcSPqHAXERERAHYcKGfz/kPYbZ2zvZk/ii76HoDy6P447SGW5QgPhIJKk+WaLu+9nuNclyU7rc0hIuIFFe4iIiICwJfr9wMwvIcWpTsSd+FeGjfM0hwnpLr2dP9se52lOfxSWDwkDrA6hYiIV1S4i4iICAAz17umyZ/UJ8HiJD7KNIkqdhXuZbHWFu4TerjWH/g8px6HU9PlveZeXV5ExE+ocBcRERH2llSxZncJhgHjesdZHccnhVbsIai2BKctiEMx1o7YjkyyExUEBzRdvm0OL9xrDlmXQ0SklVS4i4iICF82jLYf3yuO2LAgi9P4Jvdq8mUxgzBtgZZmCbQZnJbpyjAzR6vLey0qHSJTXdd3LrE2i4hIK6hwFxEREWY2rCY/dWiKxUl8l3uavNX97W7Tslx97l/m1mFqdXnvubeFy11obQ4RkVZQ4S4iItLNFZbX8F1uEQBThyRbnMZHmU6ii9YB1ve3u53cI4CwANhbbvJ9odPqOP4nZbjrcve3UFtpbRYRkRaocBcREenm5mzYj9OEYenR9IgNszqOTwo7lEtAfTn19lDKo/paHQeAkACD7J6uUfeZO7S6vNei0l2XjhrI+draLCIiLVDhLiIi0s25V5OfpmnyR+Tubz8UOwRsdovT/GBq1g997pou7yXD+OH6li+syyEi0goq3EVERLqxsuo6Fm8rBGDqEBXuRxLtY/3tbtk9AwiywY5SJ9tKNF2+zbZ8CfrgQ0R8mAp3ERGRbmzexgLqHCb9kiLomxRhdRyfZDjriSpeD0Cpj/S3u0UGGUzo0TBdXqvLt01AKBzaB/tWW51EROSILC3cv/76a8466yzS0tIwDIOPP/7Y81hdXR133XUXw4YNIzw8nLS0NK644gr27t3b6ByZmZkYhtHo66mnnurk70RERMQ/uVeT1zT5Iwsv24bdUU19QASVkZlWx2nCvbr8zBz1ubdJj+Ncl5tnWptDROQoLC3cKyoqGDFiBC+//HKTxyorK1m5ciX3338/K1eu5N///jebN2/m7LPPbnLsI488wr59+zxft9xyS2fEFxER8WtVtQ4WbCkANE3+aH6YJj8UDN+brDilVwA2A9YXOtldpunyXut1outSfe4i4sMCrHzx6dOnM3369GYfi46OZvbs2Y3u++Mf/8jYsWPZtWsXPXv29NwfGRlJSorecIiIiHjjqy0HqK5z0iM2lCFpUVbH8VlRRe7CfbjFSZoXF2pjXKqdpXsdfJlbx7XDg62O5F96jgcM2LcGyvZCVJrViUREmvC9j42PorS0FMMwiImJaXT/U089RXx8PKNGjeLpp5+mvv7oPV41NTWUlZU1+hIREeluvnSvJj8kBePwFbbFw3DWEVWyCfCd/dubM+2w1eXFS6GxP0yX3/KltVlERI7Abwr36upq7rrrLi6++GKion4YFfjVr37F+++/z/z587nhhht44okn+O1vf3vUcz355JNER0d7vjIyMjo6voiIiE+prXcyZ+N+QP3tRxNRugWbs5baoGiqwntYHeeITs90TaJcke+goELT5b3Wf6rrcov63EXEN/lF4V5XV8eFF16IaZr8+c9/bvTY7bffzqRJkxg+fDi/+MUvePbZZ3nppZeoqak54vnuvvtuSktLPV+7d+/u6G9BRETEpyzdcZBD1fUkRgYzumes1XF8VnTROgDKYoc23vfbx6RG2BiZZMcEZu3UqLvX+je0bu5YALWVlkYREWmOzxfu7qJ9586dzJ49u9Foe3PGjRtHfX09ubm5RzwmODiYqKioRl8iIiLdycx1+wA4fXAyNpvvFqRWiyo5rHD3ce7V5b/U6vLeSx4C0RlQXw05X1mdRkSkCZ8u3N1F+9atW5kzZw7x8fEtPmf16tXYbDaSkpI6IaGIiIj/cThNZq13TZOfPjTV4jS+y3DWEVmyGfCPwn1qQ+G+dK+DkmrT4jR+xjCg/zTX9c1aXV5EfI+lq8qXl5ezbds2z+2cnBxWr15NXFwcqampXHDBBaxcuZJPP/0Uh8NBfr5rEZ24uDiCgoJYunQpy5YtIzs7m8jISJYuXcptt93GZZddRmyspv2JiIg0Z3luEQcraokODWRc7zir4/gsf+lvd8uKtjMwzsamIidzd9Vxfv8gqyP5lwHT4Lu/uBaoczrB5tPjWyLSzVhauC9fvpzs7GzP7dtvvx2AK6+8koceeoj//ve/AIwcObLR8+bPn8+kSZMIDg7m/fff56GHHqKmpoasrCxuu+02z3lERESkqZkNq8lPGZRMoF3FyZFEFa8HoCx2iE/3tx9ualYAm4pqmZlTr8K9tQq3uC6DoyAwFMrz4fsPIXFg284XHAnxfdovn4gIFhfukyZNwjSPPJXraI8BjB49mm+++aa9Y4mIiHRZpmny5bqGbeC0mvxRRRX7T3+729SsQP6wopavd9dTUWcSHugfHzhYIjDUdfnv65o+9p8bju3ct6xU8S4i7crSwl1EREQ61/d5pewtrSYsyM7J/RKsjuOzXP3tvr9/+48NirPRM8pgV5nJV7vrOaN3oNWRfFdUOvz0Vair+uG+nUvh+w8gphdMuM37c5buhoXPQs2h9sspIoIKdxERkW5lZsNo+6QBiYQE2i1O47siSrdid9ZSF+gf/e1uhmEwLTOQ19bWMjOnToV7S6LSG98OiXEV7iW7IDwJQrTzkIj4BjW2iYiIdCOzN7hWk586RNPkj8Yf+9vd3KvLz9tZT41Dq8t7JTzBNdqOCXtXWZ1GRMRDhbuIiEg3kVtYwdaCcgJsBpP6a9vUo3H3t5fG+U9/u9uoZDtJYQbldbAkr97qOP6nx3Guy7zl1uYQETmMCncREZFuYs5G12j72Kw4osM0hfpIDGc9kSUbAf9amM7NZhhMzXSNus/MUeHutfQxrsu9K8F0WptFRKSBCncREZFuYlbDNPnTBidbnMS3hVfsauhvj6IqPMPqOG0yLcv1wczs3HrqnZou75Wkwa4V56tL4eA2q9OIiAAq3EVERLqF4opalucWAa792+XIog+59vX2x/52t7GpdmKCDYqqTb7Ld1gdx7/YAiB1lOt63gprs4iINFDhLiIi0g3M21SA04SBKZFkxIVZHcenRZVtBfxzmrxboN1gSi/XdPkvNV3ee+7p8upzFxEfocJdRESkG3D3t5+uafJHZWASWb4D8M+F6Q43rWF1+Vm5dZimpst7xV24H9jimjIvImIxFe4iIiJdXHWdg6+2HABgigr3owqnqqG/PdJv+9vdJvQIICQA9pabbDioRda8Ep4AsZloWzgR8RUq3EVERLq4pdsPUlnrIDkqmGHp0VbH8WlRRiXg7m/377dJIQEGJ/dwjbrP2anp8l5Ld28Lpz53EbGef/9FEhERkRbNbpgmP2VQMoafLrbWWaI9hbt/T5N3O62hz31Obp3FSfzQ4YW7toUTEYupcBcREenCnE6TOdoGrlUMZz2RdK3CPbtnAAbwfaGTfeUqPr2SNBACw6CmDAq3Wp1GRLo5Fe4iIiJd2Pd5pRQcqiE8yM74PvFWx/Fp4aVbsRsmdfZwKiN6Wh2nXSSG2RiVbAdg7i5Nl/eKLQDSRrqua7q8iFhMhbuIiEgXNrthtH3igESCA+wWp/FtUUVrACiL6uv3/e2Hm+KZLq/C3WtpDavLa4E6EbFY1/mrJCIiIk3M1jT5Vos+uBaAssh+FidpX6dlugr3JXn1VNRpWzivpI1yXRZuhtpya7OISLemwl1ERKSL2nWwks37D2G3GWQPSLI6jk8znHVEFq8HoCyqv8Vp2lffGBu9ogxqnbBwt0bdvRKRBNE9XIvT7VtrdRoR6cZUuIuIiHRR7tXkj8+MJSYsyOI0vi384HrsjirqTDuVoalWx2lXhmEwpVcgALO1LZz33KPue1dam0NEujUV7iIiIl2UezX5KYM0Tb4lUfu/AeAQoV2qv91tSsN0+Xm76nE4NV3eK2mjXZd7V4Gpn52IWKPr/WUSERERSipr+Ta3CIDTB6dYnMb3Re1fBkCpGW5xko5xXLKd6GAorjZZsd9hdRz/kjzMtcJ8+X44tNfqNCLSTalwFxER6YLmby7A4TQZkBxJz/gwq+P4Nmc9UQXLASgzu+bPKtBukJ3hmi6v1eW9FBgCSYNd1/M0XV5ErKHCXUREpAuas6EAgCmDtShdSyKK1mOvr6A+MJJKgq2O02Hcq8vPUZ+79w6fLi8iYgEV7iIiIl1MTb2DBZtdhftpmibfInd/e1ncUMCwNkwHOiUjgEAb7Ch1sr1E0+W94l6gLv97cNRZm0VEuqUAqwOIiIhI+/pmRxEVtQ6SIoMZnh5tdRyf5+lvjxsBe7+yOE3HiQwyOCHNzsI9Dubk1tNnpN3zWE6pg4ra9n298CDIira3fKA/iMuCkBioLoEDmyBlmNWJRKSbUeEuIiLSxczekA/A5EHJ2GxddwS5XTjriXT3t8ePALpu4Q5wWq9AV+G+s54bRrraAnJKHWS/X9Ehrzd/RnjXKN4NG6SNhB0LXNPlVbiLSCdT4S4iItKFmKbp6W8/Tf3tLQovWk9AXTn1QVFURmZaHafDTe4VwAOLYcV+BwernMSH2jwj7TeNCiI9on26KPPKnby8qrbdR/EtlTaqoXBfCaOvsDqNiHQzKtxFRES6kHV5ZeSXVRMWZOfEPglWx/F5Ufu/BaAsaSwY/jMyvK3E2ebn9o62saPUyTvra5ncK9BzrvQIG1kxWv7oiFIb+twPbofqUghRG4qIdB4V7iIiIl2Ie5r8Kf0SCQn0n0LUKtHuhemSx1qcpHVCGt653Tqv6pjP9fyKWp5f8cOQeIjeFR5dWBzEZkJxLuxdDb0nWhxIRLoT/S9aRESkC5m90b0NXLLFSfyAs57Igu8AKE0ZD6bFeVohNcLGc9khVB/Djm55h5y8vLqWIBvcOz6YQJtBSIDr3NKCtNENhftKFe4i0qlUuIuIiHQRe4or2bivDJsBpw5Uf3tLGvW3xwyE4hyrI7XKsRbYmdEG/9hkUFxtUlkHI5NVsLda2ihY/2/YtxpMEwwt/iginUP/pxYREeki5m9yjbaP6RVLXHiQxWl8X3TDNnBlSWPB1n3aCgzDYEyy6/tdsV/7uXslaTDYAqHyIJTlWZ1GRLoRFe4iIiJdxJyGafKnDtQ0+daI8vS3j7M4Sec7vHA3TT/oEfAVAcGQNMh1fd8aa7OISLeiqfIiIiJdwIa9pSzeVghAz7hQ1uWVtvlc2wrK2yuW7zps//bSlBMsDtP5BifYCLZDcbXJjlInfWK6z4yDY5Y6AvLXugr3gT+xOo2IdBMq3EVERPxcTmEFZ7y4yHP7pn+sapfzhgR23Yl5P/S3R1MZO8jqOJ0uyG4wItHOt/kOVuY7VLh7I3UErHob8teA09Gt2ixExDoq3EVERPxcRc0PS4yP7x3PWSPSjvmcIYE2UqNDj/k8vqpRf7vRdT+gOJrRKa7CfcV+Bz8baHUaPxLfDwLDoLYCinZAQj+rE4lIN6DCXURExM85D+tRnjQgkayEcAvT+Ad3f3tpSvfrb3cblWTHAHaWmRyodJIY1j0/wPCazQ4pw2D3Mtd0eRXuItIJ9H9oERERP+fuSQ8KsDEoNcriNH7gsP72suTu19/uFhVs0D/O9VZwVYFWl/dK6gjXpRaoE5FOosJdRETEz32XUwRAv6QIAu36096SiEb97d17jvioJFd/9iptC+edlIbCvWADOGqtzSIi3YL+uouIiPi5b3NdhfvAFI22t4ZnG7hu3N/uNrJhW7j1hU5qHdoWrtViekJoLDhqoGCT1WlEpBvo3n+tRERE/Fx+aTXbD1QAMCAl0uI0/iEqX/3tbj0jDeJCDOqcruJdWskwIGW467qmy4tIJ1DhLiIi4sfmbSrwXI8I1pqzLXLWE6X+dg/DMBjVMOq+Wn3u3nH3ueevtjSGiHQPKtxFRET82NyN+62O4FciitZjr69Qf/thRiY1LFC334Fparp8q6WOdF0WboXaSkujiEjXp8JdRETET1XVOli0rdDqGH7F09+ePK7b97e7DU2wE2CDA1Ume8tVuLdaRBJEpoLphP3fW51GRLo4/cUSERHxU0u2F1JT7yQxItjqKH7D09+erP52t5AAg8Hx2hauTbQtnIh0EhXuIiIifmpuQ3/72Kw4i5P4B8NZp/72I9C2cG3kni6/b7WVKUSkG1DhLiIi4odM02TeRlfhfnxmrMVp/EP4wcP72wdYHcenuLeF21zkpLJO0+VbLWWY67JkF1QVW5tFRLo0Fe4iIiJ+aP3eMvLLqgkNtDO8R4zVcfyC+tuPLCXcRmq4gcOE7w9o1L3VQqIhro/ruqbLi0gH0l8tERERP+TeBm5CvwSCAvTnvDWi9i8D1N9+JO5t4dTn7iX1uYtIJ9BfehERET/k3gZuyqAki5P4B/W3t2xk0g/7uTu1LVzreQr31aCfm4h0EBXuIiIifqbgUDVr9pQCkD1AhXtrqL+9ZYPibYTYobQGckqdVsfxH0lDwBYAFQegUtszikjHUOEuIiLiZxZsOgDAiB7RJEWFWJzGP6i/vWUBNoNhie5RdxXurRYYAgkNHwYVbrE2i4h0WfrLJSIi4mfmNEyTP3VgssVJ/If621tnpLaFaxv3dPnCrdbmEJEuS4W7iIiIH6muc7Bom2s67mT1t7eKq7/9OwDKUtTffjQjk11vDXeUOCmtUb92q6WNdF2qcBeRDqLCXURExI98s+MglbUOkqOCGZIWZXUcvxB+cB32+krqgmKojFF/+9HEhdjoFWVgAmu0unzrJfSHgFCoq7A6iYh0USrcRURE/Ih7G7hTByZjGIbFafzDD/3tY9Xf3grubeFWq3BvPVsAJA+xOoWIdGH66yUiIuInTNNk7kZX4a5t4FovuqG/XdvAtc6ohj73tQccOJyaLt9q7j53EZEOoMJdRETET2zef4i8kiqCA2yc2CfB6jh+wXDWEenevz1FC9O1Rt9YGxGBUFEHW4q1unyrHV64O+qsyyEiXVKbCvfevXtz8ODBJveXlJTQu3fvYw4lIiIiTblH2yf0TSA0yG5xGv+g/nbv2QyDEUmaLu+12EwIinBdL9hgaRQR6XraVLjn5ubicDT9H3lNTQ15eXnHHEpERESacve3Zw/UNPnWUn9724zStnDeM2yQ0M91PW+ltVlEpMsJ8Obg//73v57rX375JdHR0Z7bDoeDuXPnkpmZ2W7hRERExKW4opZVu4oBFe7eUH9724xIsmMAuw+ZFFY6SQjThx6tEt8f9q6CvBVWJxGRLsarwv3cc88FwDAMrrzyykaPBQYGkpmZybPPPttu4URERMTlqy0HcJowMCWS9JhQq+P4BfW3t11EkEH/WBubi52sKnBwWqYK91Zxj7gXbICacgiOsDaPiHQZXv1f2Ol04nQ66dmzJwUFBZ7bTqeTmpoaNm/ezJlnntlRWUVERLotTZP3XkThGld/e3Cs+tvbYKS2hfNeeMOikaYDdi6xNouIdClt+vg0JyeHhAStZisiItIZ6h1OvtpyAIBTVbi3WnS+q3AqTRmv/vY2cPe5rzvgpNahbeG8lrvQ6gQi0oV4NVX+cHPnzmXu3LmekffDvfHGG8ccTERERFxW7S6htKqO6NBARmXEWB3Hb0TlLwWgLGW8xUn8U88og7gQg6Jqkw0HnYxM0k4GXlHhLiLtqE0fPz/88MOcfvrpzJ07l8LCQoqLixt9iYiISPtxT5Of2D+RALtGjlvDVl9F5IFVAJSmnGhxGv9kGAYjk1y/b5ou3wb71kB1qdUpRKSLaNOI+yuvvMKbb77J5Zdf3t55RERE5EfmNxTumibfepEFy7E5a6kJS6U6MtPqOH5rVLKdebscrN7vgKFWp/EjUelQlgc7l8KAaVanEZEuoE0f29fW1nLiifr0WkREpKPllVSxKf8QNsM14i6t80N/+4lgGBan8V9DEuzYDdhfabK/wtnyE8QlbaTrUtPlRaSdtKlwv/baa/nHP/5xzC/+9ddfc9ZZZ5GWloZhGHz88ceNHjdNkwceeIDU1FRCQ0OZMmUKW7dubXRMUVERl156KVFRUcTExHDNNddQXl5+zNlEREQ6Sk5hBevySlv19e43OwEYkBJJXklVs8dsK9DfvR/zFO6pGmg4FqEBBv3jXG8X1xzQdPlWSxvlulThLiLtpE1T5aurq3nttdeYM2cOw4cPJzAwsNHjzz33XKvOU1FRwYgRI/j5z3/Oeeed1+Tx3//+97z44ou89dZbZGVlcf/99zN16lQ2bNhASEgIAJdeein79u1j9uzZ1NXVcfXVV3P99de3ywcLIiIi7S2nsILsZxZ4/byN+w5x5kuLjnpMSKD63wHsNaWEF60HtDBdexiRaGfjQSdrCpycnml1Gj+RNtp1uW8tVBVDaKy1eUTE77WpcF+7di0jR44EYN26dY0eM7yYjjZ9+nSmT5/e7GOmafLCCy9w3333cc455wDw97//neTkZD7++GNmzJjBxo0bmTlzJt999x3HHXccAC+99BJnnHEGzzzzDGlpaW347kRERDpORU09ADdl9yU9JvSox9Y5nDz22QbqHCa3nNqX1OgjHx8SaDvq491J1P5lGKaTqqje1IalWB3H7w1PsvP+pjrWFzqod5oE2NR60KKweIjvBwe3uvZzH/gTqxOJiJ9rU+E+f/789s7RRE5ODvn5+UyZMsVzX3R0NOPGjWPp0qXMmDGDpUuXEhMT4ynaAaZMmYLNZmPZsmX89Kc/bfbcNTU11NTUeG6XlZV13DciIiLSjPSYULISwo96zOrdxdQ5TOLCgxjfO96rD8e7s0b7t8sx6xVlEBUEZbWwucjJkARtC9cqmRNchXvuIhXuInLMfHZOXX5+PgDJycmN7k9OTvY8lp+fT1JS4xV2AwICiIuL8xzTnCeffJLo6GjPV0ZGRjunFxEROXardpUAMCojRkW7F6Ib9m/XNnDtw2YYjGjYw32t+txbL+tk12WO+txF5Ni1acQ9Ozv7qG8g5s2b1+ZAneHuu+/m9ttv99wuKytT8S4iIj7FNE1P4T6yZ4ylWfxJYGUBYaVbMTEoSznB6jhdxvBEOwv3OFhT4ODiQVan8ROZDYX7/u+hsgjC4qzNIyJ+rU2Fu7u/3a2uro7Vq1ezbt06rrzyyvbIRUqKqydt//79pKameu7fv3+/5/VTUlIoKCho9Lz6+nqKioo8z29OcHAwwcHB7ZJTRESkI+SVVHGgvIZAu8HQtGir4/iN6P2u0faKuMHUB2tBsPYyPNGOAewsMymuNokN0QyQFkUkQcIAKNwMOxfDoLOsTiQifqxNhfvzzz/f7P0PPfRQu23FlpWVRUpKCnPnzvUU6mVlZSxbtowbb7wRgPHjx1NSUsKKFSsYM2YM4BrtdzqdjBs3rl1yiIiIWME92j4oNYqQQPUUt1b0Pld/e5mmyberqGCDzGgbOaVO1h5wMDGjTW8hu5+sk12Fe85CFe4ickzatcf9sssu44033mj18eXl5axevZrVq1cDrgXpVq9eza5duzAMg1tvvZXHHnuM//73v3z//fdcccUVpKWlce655wIwaNAgpk2bxnXXXce3337L4sWLufnmm5kxY4ZWlBcREb+2ancxAKMyNGrcaqZJlBam6zAjklxvG9Xn7gX3dHnt5y4ix6hdPy5dunSpZ3/11li+fDnZ2dme2+6+8yuvvJI333yT3/72t1RUVHD99ddTUlLChAkTmDlzZqPXePfdd7n55puZPHkyNpuN888/nxdffLH9vikREZFOVlFTz+b8QwCMUn97qwWX7yakIg+nEUBZ0vFWx+lyRiTa+XhrPWsLHDhNE5sWTGxZ5gTXZcEGqCiE8ARr84iI32pT4X7eeec1um2aJvv27WP58uXcf//9rT7PpEmTME3ziI8bhsEjjzzCI488csRj4uLi+Mc//tHq1xQREfF1a/eU4jQhLSaE5KjWfyDe3UXvWwxAeeJInIFH32pPvNc31kZoAJTXQU6pkz4xauFoUXgCJA12Fe65i2DIuVYnEhE/1aap8odvpRYdHU1cXByTJk3i888/58EHH2zvjCIiIt2Kpsm3Tcw+13Tk0tQJFifpmgJsBkMb9nBfU+C0OI0f0XR5EWkHbRpx/9vf/tbeOURERARwmiZrdpcAmibvFaeD6Ib+9hIV7h1mRJKd7/IdrC1wcF7/QKvj+IfMCfDtq64RdxGRNjqmHvcVK1awceNGAIYMGcKoUaPaJZSIiEh3teNAOWXV9YQG2hmQEml1HL8RcfB7AmrLqA+MpDx+uNVxuqzhia7JmltLnFTUmYQHqs+9RZkTAAMObILyAtc2cSIiXmpT4V5QUMCMGTNYsGABMTExAJSUlJCdnc37779PYmJie2YUERHpNtzbwA3vEU2ArV03f+nSove5RjNLU8aDTVuVdZTEMBtp4QZ7K0zWHXAwLk0/6xaFxUHyUNj/vWu6/NDzrU4kIn6oTe8IbrnlFg4dOsT69espKiqiqKiIdevWUVZWxq9+9av2zigiItJtrNI0+TaJcRfuaSdbnKTrG57U0OeubeFaL8vd567p8iLSNm0q3GfOnMmf/vQnBg0a5Llv8ODBvPzyy3zxxRftFk5ERKQ7Ka6sJaewAoARPWKsDeNHbHXlRBxYCai/vTOMSHQV7msLnEfdHUgO494WLkcL1IlI27SpcHc6nQQGNl2QJDAwEKdTq4yKiIi0xeqGafJ9EsOJCQuyNowfidq/DJtZT3VET2oie1kdp8sbFG8j0AYHq03yylW4t0qvEwEDDm6Fsn1WpxERP9Smwv3UU0/l17/+NXv37vXcl5eXx2233cbkyZPbLZyIiEh34t4GbqS2gfNKzF7X9OOS1JMsTtI9BAcYDIxzvYVcW6Dp8q0SGgupDYsm7lxsbRYR8UttKtz/+Mc/UlZWRmZmJn369KFPnz5kZWVRVlbGSy+91N4ZRUREurw6h5Pv80oB9bd7y7MwnabJd5oR6nP3nns/95yvrc0hIn6pTUuBZmRksHLlSubMmcOmTZsAGDRoEFOmTGnXcCIiIt3FpvxDVNc5iQ4NJCsh3Oo4fiOoYi9hZdsxDRulKSdaHafbGJ5khw11bDzopNZhEmTXtnAtyjwZlv7RtbK8iIiXvBpxnzdvHoMHD6asrAzDMDjttNO45ZZbuOWWWzj++OMZMmQICxfqf0YiIiLeWrXLPU0+BpuhIqi13KPt5fHDcQRHW5ym++gRYRAXYlDnhA0Htb5Rq/QaD4YNinZAaZ7VaUTEz3hVuL/wwgtcd911REVFNXksOjqaG264geeee67dwomIiHQX7v3bNU3eO55t4FK1DVxnMgyDEUnqc/dKSDSkjnBd17ZwIuIlrwr3NWvWMG3atCM+fvrpp7NixYpjDiUiItKd7CutIr+sGrvNYFi6Ro1bzXQSvW8JoIXprDA8UX3uXnP3ueeqz11EvONV4b5///5mt4FzCwgI4MCBA8ccSkREpDtxj7YPTIkkLKhNy890S+FFGwisKcIREE554iir43Q7QxPsGMDecpPCSk2Xb5WsU1yXGnEXES959e4gPT2ddevW0bdv32YfX7t2Lampqe0STEREpLtYtbsEgFHaBs4rntXk44ZhFu889hOW7j72c3QjEUEGfWNtbC12suaAk8m92rRZUffS8wQw7FCcCyW7ISbD6kQi4ie8KtzPOOMM7r//fqZNm0ZISEijx6qqqnjwwQc588wz2zWgiIhIV1ZV62DjvjJA/e3eitk1C4DS/Bz436/b78QBwe13ri5uRJKdrcVO1hY4mNxLs0VaFBwJaaMgb7lrdfmRl1idSET8hFf/h73vvvv497//Tf/+/bn55psZMGAAAJs2beLll1/G4XBw7733dkhQERGRrmhdXikOp0lyVDCp0SEtP0EAsNceIrLoewCKB1wIsQPb58QBwRCe0D7n6gZGJNr4aDN8X+ig3mkSYNOOCC3KOtlVuOeocBeR1vOqcE9OTmbJkiXceOON3H333ZimCbhWFp06dSovv/wyycnJHRJURESkK1q127UN3KiMWAxtA9dq0fmLsZkOqswgamIHQnS61ZG6pd4xNiICobwOthU7GRhvtzqS78ucAIueh53qcxeR1vN6TlOvXr34/PPPKS4uZtu2bZimSb9+/YiNVV+eiIiIN0zT/KG/XdPkvRKT9xUAJWa4xUm6N5thMDTRzjd7HXxf6FDh3hoZJ4AtAEp2QfFOiO1ldSIR8QNtXkUkNjaW448/nrFjx6poFxERaYPcg5WUVNYRHGBjUGqU1XH8h2kSs9dduEdYHEaGJbiK9e8PaGX5VgmOgLTRrutaXV5EWknLf4qIiFhk1S7XNPlh6dEE2vUnubXCSjYTXJmP0xZEGWFWx+n2hiW6fne3FTupqDMtTuMnMie4LlW4i0gr6V2CiIiIRdzT5EdqmrxX3KPtpfEjcOqtjOUSw2ykhhuYwIZCh9Vx/IMKdxHxkv7aiYiIWKC8pp7tBeWA9m/3Vkze1wCUJB5vcRJxG5bomi6/VtPlWydjnKvPvbShz11EpAUq3EVERCywZf8hTKBXfBhx4UFWx/EbtrpyIg8sB1S4+xL3dPl1GnFvHfW5i4iXVLiLiIhYYHP+IQBG99Rouzei9y3B5qyjKrIX1eHaAs5XDI63YzMgv8LkQKVG3Vsl62TXpQp3EWkFFe4iIiIW2FrgKtxHZcRYG8TPxLpXk0+bZG0QaSQs0KBvjOttpabLt5L63EXECyrcRURELFBd5yQyJIA+idrOrNVMk5i8BQCUpJ9ibRZpwj1d/vsDmi7fKo363HOtTiMiPk6Fu4iIiEVG9ojBZjOsjuE3Qku3Ely5z7UNXPIJVseRH3EvULe+0IHT1LZwLQoKh/QxrusadReRFqhwFxERscgobQPnlZg81zT5spQTcAaEWpxGfqxPjI3QACivg73lKtxbRdPlRaSVAqwOICIi0p3kl1YDYDNgeI8Ya8P4mdi9CwAoTptobRBpVoDNYEi8neX7HWwr7uZ97oVbWndcZKrrcvs8yFsFRjMzcIIjIb5P+2UTEb+kwl1ERKQTfZdbBECv+HDCg/VnuLXstWVE7v8OgJL0bIvTyJEMS7SxfL+DrSXdtHAPbJgJ8u/rvHte+X74y6QjP37LShXvIt2c3jGIiIh0ouW5xQAMSI60OIl/icn7CptZT2VUH6qjMq2OI0fg6nOvY1dZNy3co9Lhp69CXVXrn7P4D1CcAyMudi1Yd7jS3bDwWag51L45RcTvqHAXERHpJJW19azNKwFgQIoKd2/E7pkLQHHGZIuTyNGkhBskhBoUVnXjHveodO+O7zHWVbiX74f4vh2TSUT8nhanExER6SRLth2kzuEqaJIigy1O4z8MZ90P/e09VLj7MsMwPNvCSSulDHNd5n8PWo1fRI5A/2cVERHpJHM3FXiuG80tQiXNiixYQUBtGXXBsRxKGG11HGmBe1s4aaXEga793CsOuEbdRUSaocJdRESkE5imyYLNBS0fKE14psmnZ4NNRaGvG5pgx/2x1MGqbtrr7o3AEEjo77qev9baLCLis1S4i4iIdIKN+w6xr7Sa4AD96fWKaRK7Zw4AxT1OtTiMtEZkkEFahKt0X13gsDiNn0g+bLq8iEgz9O5BRESkE8xvGG0fkRFjbRA/E1K2g9BDO3HagihJO8XqONJKfWNcbzFVuLeSu899/zr1uYtIs1S4i4iIdIJ5Df3tx2fGWpzEv8Q1TJMvSx6HMzDC4jTSWn1j3YW7E1OFaMuS1OcuIkenwl1ERKSDFVXUsnKXa//243rFWZzGv/wwTV6ryfuTXlGut5jFNSabi9Tn3qIA9bmLyNGpcBcREelgX20pwDRhYEokidoGrtUCqouIPLASgCIV7n4lwPbDrgmL8uotTOJHUtTnLiJHpsJdRESkg83bdACAyYOSLE7iX2LyFmCYTipiB1IbkW51HGmjhXtUuLeK9nMXkaNQ4S4iItKB6h1OvmpYmO7UgSrcvRHn2QZOo+3+bNk+BzUOFaItcu/nXlkIh/KtTiMiPkaFu4iISAdauauEsup6YsMCGZmhhelay3DUEL1vIQDFGVMsTiNtFRtsUF0PK/K1unyLDu9z36/p8iLSmAp3ERGRDuReTX5i/0Tsh/X9ytFF71tMQF05taFJlMcPszqOtNHIJNdbTU2XbyX1uYvIEahwFxER6UDzNrm2dsrWNHmvxO/6EoCijNPB0NsVfzUyyQ7AIhXuraM+dxE5Av0lFBER6SB7iivZsr8cm+EacZdWctZ7toE72HOaxWHkWIxqKNzXFToprta2cC1Sn7uIHIEKdxERkQ4yv2Ga/HG94ogJC7I4jf+I2v8tgTXF1AXHUpY81uo4cgziQm0MiLNhAovz1OfeooAQSBjguq4+dxE5jAp3ERGRDuLub9c0ee/E75oJQHGPKa7RR/FrE9Jd/4aaLt9Knunya63NISI+RYW7iIhIB6iqdbBk+0FA28B5xXQSt3sWAAd7aZp8VzChh2u6/MI99Zjq226Z+txFpBkq3EVERDrA0h2F1NQ7SY8JpX9yhNVx/EbEgVUEVRVQHxhBacqJVseRdjAuNYBAG+SVm+wsUyHaosQBDX3uB1297iIiqHAXERHpEHM3uqfJJ2IY2gauteJ3u1aTL04/FdMebHEaaQ9hgQajkxtWl8/TdPkWHd7nfnCbtVlExGeocBcREWlnpml6FqbTNHkvmCZxO1397UWaJt+lTOihPnevuKfLq3AXkQYq3EVERNrZ5v2H2FtaTUigjRP7JFgdx2+EFW8gpGIPDnsIJamnWB1H2tGEdNeI+5K8ehxOTZdvkQp3EfkRFe4iIiLtzL2a/Il9EggJtFucxn/EN4y2l6RPxBkYZnEaaU/DEu1EBkFZLXxfqP3cW+Tez7261OokIuIjVLiLiIi0s/naBq5N4hr624syplqcRNpbgM1gfJpruvxiTZdvWUCwq3gXEWmgwl1ERKQdlVTWsmJnMaD+dm+Elm4jrHQbTlsgxT1OtTqOdICT3X3uWqCuddzT5UVEUOEuIiLSrr7acgCnCQOSI0mPCbU6jt+Iz/0UgNLUk3AERVmcRjrCSQ197ivyHVTVqc+9RcmHFe7az12k21PhLiIi0o7maZq890yThNz/AVCYebbFYaSjZEXbSI8wqHXCt/kOq+P4vsQBYGtYI6Msz9osImI5Fe4iIiLtxOE0+WrLAQAmD1Lh3lphxRsILcvBaQ+mKGOK1XGkgxiGwUnp6nNvtYBgiMl0Xd+7ytIoImI9Fe4iIiLtZNWuYkoq64gODWRURozVcfxGQo5rtL04/VScgREWp5GOdFJDn/tC9bm3Tnxf1+W+1ZbGEBHrqXAXERFpJ+5p8hP7JxJg15/YVjGdJOx09bcXZp5pcRjpaO4+940HnRRWaVu4FrkL972r1ecu0s3pXYWIiEg7cRfuWk2+9SIPrCS4Yi/1gRGUpGdbHUc6WEKojUHxrrefi/PU596i2F6uy8pCKNphbRYRsVSA1QFERER8WU5hBRU1LU/rLThUzab8Q9gMSIoMZl1eabPHbSsob++Ifs29mnxxxmk4A0IsTiOdYUJ6ABsP1rJ4Tz3n9A20Oo5vswf9cD3na4jvY10WEbGUCncREZEjyCmsIPuZBV49x2nCJa8va/G4kEBNesNZT/zOzwAozDzL4jDSWSb0COAva2tZlFePaZoYhmF1JP+QuwiOu9rqFCJiERXuIiIiR+Aeab8pu2+Le7L/fWkum/IPcfrgZCYNOPpU+ZBAG6nR2uM9ev83BFUfpC44ltLUk6yOI51kbIqdIBvsLTfJKXXSO8ZudST/kLvI1eeuDzpEuiUV7iIiIi1IjwklKyH8iI/X1jvZcaACcPW394o/8rHyA/c0+YM9p2HaNGW6uwgNNBiTYmfpXgeL8xwq3FvDHgTl+XBwOyT0tTqNiFjA5+fpZWZmYhhGk6+bbroJgEmTJjV57Be/+IXFqUVEpDtZl1dKrcNJQkQQPePCrI7jFwxHDfG7ZgJwUNPku50JDfu5L9J+7q2TNNh1mfu1tTlExDI+X7h/99137Nu3z/M1e/ZsAH72s595jrnuuusaHfP73//eqrgiItINrdhVDMDonrHq122lmL2LCKgtozY0mbKk462OI53MvZ/7kr311Du1zVmL0ka5LnMWWptDRCzj81PlExMTG91+6qmn6NOnDxMnTvTcFxYWRkpKSmdHExERwWmarGwo3Mf0irU4jf9IyPkPAIW9zgCbpkp3N8MSbEQFQVktfH/Awahkn39Laq20UbDib66V5dXnLtIt+fyI++Fqa2t55513+PnPf95oROPdd98lISGBoUOHcvfdd1NZWXnU89TU1FBWVtboS0REpC1yCisoqawjNNDOoNQoq+P4BXtNKXG75wBwoM95FqcRK9htBic2TJfXfu6tkDQYAkJd+7kXbLQ6jYhYwK8K948//piSkhKuuuoqz32XXHIJ77zzDvPnz+fuu+/m7bff5rLLLjvqeZ588kmio6M9XxkZGR2cXEREuqoVO12j7cN7RBNo96s/q5ZJ2PkpNmctFTEDqIwdbHUcschJDYX7QvW5t8weCL3Gu67nqM9dpDvyq3lJf/3rX5k+fTppaWme+66//nrP9WHDhpGamsrkyZPZvn07ffr0afY8d999N7fffrvndllZmYp3ERFpE3fhrmnyrZe4/V8AHOhzvqb8dmMnN/S5r9zvoLLOJCxQvwtHlTURts+DnK/gBC3ELNLd+M3QwM6dO5kzZw7XXnvtUY8bN24cANu2bTviMcHBwURFRTX6EhER8daBQzXsKqrEMGBkRozVcfxCSOkOIgtXYxp2CrPOsTqOWKhXlEF6hEGdE5bt06h7i7JOcV3mLgKHfl4i3Y3fFO5/+9vfSEpK4ic/+clRj1u9ejUAqampnZBKRES6M/eidAOSI4kM0T7krZG4wzXaXpJ2CnWhiS0cLV2ZYRhM6KE+91ZLHQHB0VBTBvlrrE4jIp3MLwp3p9PJ3/72N6688koCAn6Y3b99+3YeffRRVqxYQW5uLv/973+54oorOOWUUxg+fLiFiUVEpDvQNHkvOR0k7vgYgII+51ubRXyC9nP3gs0OmRNc19XnLtLt+EWP+5w5c9i1axc///nPG90fFBTEnDlzeOGFF6ioqCAjI4Pzzz+f++67z6KkIiLSXVTW1rNhn2tXkjE9Vbi3RvSO/xFcuY/6wEiKQzPh4JHb2lqldHe75BLrnJju2gpwU5GTA5VOEsP8YkzJOlmnwObPYMdXMOE2q9OISCfyi8L99NNPxzTNJvdnZGTw1VdfWZBIRES6u7V7SnE4TdKiQ0iNCbU6ju8ryyNx8UNgg8IaO+bnd7bfuQOC2+9c0qniQ20MSbCxvtDJkjwH5/RT4X5UvSe6Lnd9A/U1+t0X6Ub8onAXERHxNe5p8qM1Tb5V7JWFxBmHADgw9FqIyGqfEwcEQ3hC+5xLLDEhPYD1hbUsyqvnnH5aK+KoEgdCeCJUHIA9yyHzJKsTiUgnUeEuIiLiJYfTZNVu9bd7Iz7/a+yGSWVIMuVpJ2kbOPE4KT2AV9fUsmhPPaZpYuh348gMwzVdft2/XH3uKtxFug3NRxIREfHS5v2HqKhxEBEcQP+kSKvj+IXEPbMBOJBwgop2aWRsqp0gO+yrMNlR6rQ6ju9zbwuXo3ZRke5EhbuIiIiXVjZMkx/VMwabTUVoS0JLthBVvA7ThAMJY62OIz4mJMDguGTXInWL9mhbuBZlNfS57/kOaiuszSIinUaFu4iIiBdM09Q2cF5K3vo+AMVEUBcUY20Y8UknNeznvihP28K1KDYTonuCsx52LbU6jYh0EhXuIiIiXthbWk1+WTUBNoPh6TFWx/F5tvpqEnf8G4D9Tn3QIc07uaFw/2ZvPfXOpjsJyWHcfe6g/dxFuhEV7iIiIl5wj7YPTosiNMhucRrfF7/zMwJqy6gOTaaEcKvjiI8aEm8jOhgO1cKaAk2Xb5EKd5FuR4W7iIiIF1ZqmrxXkhqmyRdknAFoPQBpnt1mcFK6a9R9cZ4K9xa5C/e9q6Gq2NIoItI5VLiLiIi0UllVHVsKXHuRj+mpwr0locWbiTqwAqcRQEHGVKvjiI9zF+7qc2+FqFRI6A+YkLvY6jQi0glUuIuIiLTSqt0lmCZkxocRHxFsdRyfl7z1PQCKMyZTFxxncRrxdRMaCvdV+x1U1KnPvUWaLi/SrQRYHUBERMRfuKfJj+4O0+TL8qCuqs1PtzmqSdz+LwD2J0+E0t3tlUy6qF7RNjIiDXYfMvl2Xz3ZPQOtjuTbsk6B715X4S7STahwFxERaYXaeidr9pQA3WCafFke/Pv6YzpFvFFCgK2CajOQ0m/+jqe/PUAzFeTIJqQH8N6mOhbtcahwb0nmyYABBzbCof0QmWx1IhHpQCrcRUREWmHDvjJq6p3EhgWSldDFV0d3j7QPvxDCk9p0iuT1T0MF7M84A9JOd90ZEAzhCe0UUrqik3o0FO7qc29ZWBykDIP8tZC7EIZdYHUiEelAKtxFRERaYcVhq8kbRjdZHT08CaLTvX9a6TYiK3JxGgEc6P1TCI5p/2zSJZ2YZscANhc5Kah0khSm5ZiOKusUV+Ge85UKd5EuTv83FBERaYFpmqzc1dDf3tWnybeD1N3/A+BgyknUqWgXL8SF2hiS4Hp7uniPRt1blDXRdak+d5EuT4W7iIhIC/aVVlNUUUtwgI0hadFWx/FpgdVFxOe7tqfa1/Msi9OIP/phWzjt596iXuPBsENxLhTvtDqNiHQgFe4iIiIt2LCvDIBh6dEEBehP59Gk7PkCm1lPWcwgKqL6Wh1H/NDJPVyF++K8ekxT28IdVXAkpI9xXdeou0iXpncfIiIiLdiw11W4H5epvciPxnDUkrznS0Cj7dJ2x6XYCbJDfoXJ9hKn1XF8X29NlxfpDlS4i4iItCC/rBqb0Q22gTtGCflfE1hXRk1IIkWJ46yOI34qJMBgbIodgIXqc29Z1imuy5yvQTMURLosFe4iIiKtMCg1iogQbcZyRKZJ6i7XonT5GWeAzW5xIPFnExqmyy/aoz73FvUYC/ZgKM+Hwq1WpxGRDqJ3ICIiIq1wvKbJH1VU8feEl+/EYQtmf/ppVscRP3dyjwCeWlbD0r311DpMguzdZAvGIynccvTHk4fA3pX/3959x0dR538cf832Te+VQELvvQgoiCLYu2Iv59kOPeud5fQ8PU89+/3Qs+LZu9hAsVCkI9JrQkJCQgrpPdvn98dAFCmhbDKb3c/z8djHluzOvgOT3fnMt8G692DAeW1vzxoJ8T38k00I0SGkcBdCCCEOoqbZ1Xp7ZDfpJn8oqYWzAahIm4TXHKFzGtHZ9Ys3kGBXqGxR+aXMy7j0ED1kNdu161nXH97zlz6vXQ7HrWukeBeiEwnRT0EhhBCibSt3VAOQHmMnPsKqc5rAZWsuJbZiFQClXc/UOY0IBgZF4YQuJj7f7mbxLk/oFu5R6XDeK+BuOfTzagq0gt0cBlMeBeUQo2HrimDxM+Bs8GdSIUQ7C9FPQSGEEEGrKs9vB6TLt1UAMCAtyi/bC1ZpBZ+joFITPxxHeBe944gAknsMs8J3j9aKz+/zPZzR3Uu4BbKiQ3DuhKj0tp8TmwU/vwruZkCBeFmKUYhgI4W7EEKI4FGVBzOG+2VTDaqd9c5XABP9o2Vm64OxOKpILJkPQHHWBTqnEYFi7zyOt89vo6X4MOTV+ThzVhMACy4JD83ivS0GI6QMhqIVULoWEnrpnUgI4WdSuAshhAgee1vaT7gLojOOaVMLiqx4Vmpfk0k2mdn6YFJ3folB9VAf04+G2AF6xxEBIjXCwLOTbDiO8ZzXjDVOSptUJncz8uNOL02utl8TstKGaoV7yVoYdLHeaYQQfiaFuxBCiOATnXHMXUW/W9MMSEv7oZhc9SQXfw9AcdaFOqcRgSY14thXHR6VauSrXA9VLbI+eZtSh2nX5VvB7QCzTd88Qgi/knXchRBCiN9xeFQWFknR3paUojkYvQ6aIrOojffPEAUhfmtwotYtfnvN0Y+VDxlRaRCeCD4P7N6kdxohhJ9J4S6EEEL8zrJiD01uiLdKsXAwBk8LqYVzACjOvACUEF9nW7SL3rEGrEZodOudpBNQFEjb0+peulbfLEIIv5PCXQghhPid7wq01vbjEmRA7cEk7/oOk6eRlrA0qpLH6h1HBCmzUaF/vByuHrbUodp1yTo9Uwgh2oF8EgohhBC/4fGpfL+ncB+bJIX7gSg+N2k7vwSgOPN8UGSWb9F+9naXF4chdQigQO1OaK7WO40Qwo+kcBdCCCF+Y2WJlxqHSqxNYVCMjHM/kMSS+VhcNTit8VSmTtQ7jghyg5N+LdwdHpmk7pBs0RDfQ7tduk7XKEII/5LCXQghhPiNb/K1wbRTM00Y5VtyP4rXRZcdHwNQknkeqsGscyIR7FLDFWKs2u2NlbI0Y5tau8vLOHchgokckgghhBB7eH0q3+VrreyndZeC9EBSds3F6qzCaUtgd5epescRIUBRFHrFaq3ua3dL4d6m1gnq1oMqPRSECBZSuAshhBB7rCrzUtmiEm2FcWkyrvb3DJ4W0vM/A2BX92nS2i46TK9Y7ZB1TbkU7m1K6g9GK7RUa2PdhRBBQQp3IYQQYo9vd2jd5E/pZsZslOXNfi+18GvM7jpawtIoTz1J7zgihPSI0Q5ZdzWoFDfIMo2HZDRD8gDttswuL0TQkMJdCCGEAHyqyrd7usmf3t2kc5rAY3I3kLbzCwCKelwKBumRIDqO3fTribTFu2TSyDalDdWuZT13IYKGFO5CCCEEsGa3l/JmlUgLjO8ihfvvpRV8gcnTTFNEJlXJ4/WOI0LYIinc25a6Z5z77k3gdeubRQjhF1K4CyGEEMA3O7RiYHI3M1bpJr8Ps7OGlMLZABT1vAwUOXwQ+lmyy4PXJ5OuHVJsJthiwOOE8i16pxFC+IF88wohhAh5PlVtHd9+mnST3096/icYfU4aontTkzBK7zgihIWbod4F6ytkkrpDUhRIH67dLlmjbxYhhF9I4S6EECLkrS/3UtqkEm6GCdJNfh/2xiKSd30HQGHPK7SCQAidDE3S5lZYVCSFe5vSRmjXxav1zSGE8Asp3IUQQoS8b/d0kz+pqwmbSQrTVqpKZs4bGFQv1YmjqY8brHciEeKG7SncZYK6w5A2FFCgpgCaq3QOI4Q4VlK4CyGECGmqqvJNvtZN/vTusi75b8XUbSamai0+xcTO3tfqHUcIhidrhfu6ci91Thnnfki2aEjopd0ulu7yQnR2UrgLIYQIaesrfOxqULGbYGKGdJPfS0Elc+dnAJR2OwtHWKrOiYSApDADPWIMeFVYXiyt7m1K39NdXsa5C9HpSeEuhBAipH2dq7W2n5JpIsws3eT3SlGqsTvLcVliKM66SO84QrQ6Yc88FD9Jd/m2pe2doG4t+GReACE6M2laEEIck/zKJpqc/j14CreayEoI9+s2hTgQn6oyO08r3M/qId3k9zI7a+iiVAJQ2PNKvKYwnRMJ8auJGUbe3ASLijyoqooiEyYeXEJvsESAqxEqcyCpn96JhBBHSQp3IcRRy69sYtLTC9tl2wvuPlGKd9HuVpV62d2sEmmBCdJNvlVGzpuYFB+NYV2pSJukdxwh9jEm1YTFAMWNKvl1PrrHGPWOFLgMRkgbBgWLte7yUrgL0WnJUYoQ4qjtbWmfPqkn6TF2v2yzuLaFFxfk+r0VX4gD+XpPa/vUTDNWo7TaAUSWryKpaC4ABd0uBEVG1YnAEmZWGJliZFmJl0W7vFK4tyVtuFa4F6+GoZfrnUYIcZSkcBcigPm7G3p7dUFPj7FL67jodDw+tXUZuLN6Sjd5AMXrpMfy+1BQKfdF0xDZQ+9IQhzQhAyTVrgXebhmoEXvOIEtfc8498rt4KjTN4sQ4qhJ4S5EgGqvbujSBV0IzfISL1UOlTibwrg0abED6LJhBvb6HbiscRQ0J+gdR4iDmpBh4omVTpaXeHB6Vekxcyhh8RCbqa3nXrIOotN1DiSEOBpSuAsRoPzdDV26oAuxr72zyZ/W3YRZDvoJq95C+uZXAMgfcAveVZ/onEiIg+sXZyApTKG8WWVVqZfju8gh7SGlj9hTuK+Wwl2ITko+5YQIcNINXQj/c3pV5ubLbPJ7KT43PZbfg6J6qep6GtUpxwNSuIvApSgKJ2aY+DjbzYJCjxTubUkbAZs+g+I10PdMvdMIIY6CzDgjhBAi5Cwu8lDvguQwhVEp0k0+dcvrRFRvxmOJJn/0P/SOI8RhmdRVK9YXFElPsjYl9QOTDRy1UF+idxohxFGQwl0IIUTI2Tub/Bk9zBgNod1N3l67nYz1/wEgf+SDuO2JOicS4vCMTzdhMsCOWh8763x6xwlsRjOkDtFul2/VN4sQ4qhI4S6EECKktLhVfijYM5t8j9DuXmvwtNB70a0YfC5q0iZS2f08vSMJcdiirNqycAALpdW9bekjtevyLfrmEEIcFSnchRBChJQfd3po9kCXSIWhSaHdTT5z1SOE1eXgsieSN+5JUEK794HofCZl7OkuXyiFe5u6jNKuawp0jSGEODpSuAshhAgpn2/Xusmf29OMEsKFakL+lyTnfoSKwvbxz0kXedEpndRNK9yXl3hocas6pwlw4QkQmwXIv5MQnVFo9xEUQggRdPJ9KTTVmMDn3e9ndU61tUvtgAQjmyr2f85v5TYEZ4u8rT6f7iseAGDX4FuoTx2ncyIhjk7PGAPpEQrFjSrLSzyc1E1WiTikLqOhJl/vFEKIoyCFuxBCiKCRX+thkutZmAfQdMjn3vxDy2FsMRoAmzl4WuYVr5Nei/+M0dNEXfIYdg36s96RhDhqiqIwqauJd7doy8JJ4d6GjFGw8SPttk+GFwjRmUjhLoQQImg0ubSZpaf3aSI9KX6/n/93rZNdjSpndjcxLv0wvgIby7FtfIfUiIf8HVUfqkr3lQ8QUb0ZtzWO3OOfA0Nw9ioQoaO1cC/yoKpqSA+BaVN8L7CEg6sJyjZCl5F6JxJCHCYp3IUQQgSd9DAvWTH7TuNS0uhjV6OKQYEze5qJth7Gwb3iBaW6nVJ2vC4bZ5CU9xmqYmT78c/hCkvRO5IQx2xsmgmLEXY1qOTV+ugZKyejDspghKT+sGsVFK6AkdfqnUgIcZhkcjohhBAhYfEurVvo4ETD4RXtQSYhbxYZ658HYMfoR6hLO0HfQEL4SZhZ4bhUrViX2eUPQ9IA7bpwmb45hBBHRAp3IYQQQc+nqizdpU1Ed0KX0OtsFlW6jB4r7gOgeMCNlPe+VOdEQvjXpK6yLNxhS+yjXdcWQvUOfbMIIQ6bFO5CCCGCXk61j4oWFbsJRqaEVjdae+12+vx0Mwafm8puZ1A47C96RxLC7yZ11SalW1XmpcEly50dktn+6+2c7/XLIYQ4IlK4CyGECD7qvgfue7vJj041YjGGTjd5e002/X+8ApO7gfrEEeSOfxoU+eoXwScz2kBWtAG3D5bsklb3w5YzV+8EQojDFHr9BYUQQnRermYoXQ9VuVBT8OuluRLcLfRzNJBjdWLe5sOba8drCqfJGM2q6vsBG+ealhNbEUZLWBccYSlBXcSGV22k37xrMDtraIrtS/aJr6AarXrHEqLdnNzNxOsbXPy408Np3WVZuMNSsAScDWCN1DuJEKINUrgLIYQIXPWlsGOBNgPyrl9g92ZQvQd9uhHY26Bu8jRj8jQzz5tFg2ojlSrOKfkPhlKtNd5jCqc+pj/1sQOojx1IU2RW0CyNFln+C33n/wGTu5GG+CFsO/l/eKwxescSol3tLdwXFHrw+lSMhtDpXXNUotKhvhh2LIR+Z+mdRgjRhoAu3P/xj3/w8MMP7/NYnz592LZtGwAOh4O77rqLDz/8EKfTydSpU/nvf/9LcnKyHnGF6PTK6hxsLqlje3kj23c3klveQGWjC1VVUdF6H5uMCt3iw8hKCMdu1oqcFtfBC6mjlVve6LdthVtNZCWE+217oh35vFC8GnK+g+3fQ9mG/Z8TkQLJAyA2c8+lm/aYJYyc3O1cObuJewY1kZUYgcnTxP82xEAtnBRfTXXE8dibd2FrKsbkaSKuchVxlasA8JgiqEkcSVXScdTFD8PXSVuno0qX0nfBDRi9LdQnjWbbpNfwWqQ1TQS/kclGoixQ7VBZW+5lZEpAH+bqr+s42PQJZM+Vwl2ITiDgP9EGDBjAjz/+2HrfZPo18h133MGcOXP45JNPiI6O5pZbbuH8889n6dKlekQVolMqrGrm202lfLOpjPVFtYf1ml01LSzNrWq9/69vttA3JYphXWMY3jWW1GgbinJ0LR02s9Z1+faP1h3V6w9mwd0nSvEeqFQVStfBxk9h02fQUPqbHyqQNgy6jYMuo6DLSK2V6CD7l6ushd1U4DCZcISnUN3i45daBwAjBg9ie8QQ7Yk+L+ENO4iq2UxUzSaiardg8jSSWLqQxNKFeA0WahOGUxk1iBp8dIqprlSV1K0z6brmSQyqh9q0CWRPfAmfyd72a4UIAmajwqSuJr7M9fDjTo8U7m3pNlYr3HPmaidNg6THkRDBKuA/0UwmEykpKfs9XldXx8yZM3n//fc56aSTAPjf//5Hv379WLFiBccdd1xHRxWi0/D6VL5eX8LMJflsLK5rfVxRoE9yJL2SI+mVFEGvpAhSom0YDQoKCooCDreXgqpm8isbWVtYy7K8KnwqbCmtZ0tpPe+tLKRbXBinDkxhXI8ELKYjG0OcGm3n2YuH4HD7/PK7Fte28OKCXJqcMllRwGkogzXvwIYPtTHre1mjoefJ0GsK9JwMEYlH/RY/FXlRgb5xBlIjfrMvGow0RfeiKboXpZnnguolsnYbceUriCtfgc1RQXz5CuLLV+A2GKnaNIOKAX+gMX7wQU8a6MnorKPnsr8Qt0s70V2ZeRa5456UMe0i5JzczawV7gUe7h2jd5oAlzoEbNHaHCFFP2uFvBAiYAV84b59+3bS0tKw2WyMHTuWxx9/nK5du7J69WrcbjeTJ09ufW7fvn3p2rUry5cvP2Th7nQ6cTqdrffr6+vb9XcQIpDM31bOnz9Yy47KJgAMCoztEc9pA1OZMiCZpEhbm9sYmRkHwKbiOs6csYS7TulNZaOTNYW1bCmtZ2d1M68s2sEHPxcyuV8yk/snExtmOeyMqdHSQhi0fD7IXwi/vAHbvvl1vLrJBn1Og0EXa0W76dgLTp+qsrBIO2FzYtc2vu4UIw2xA2iIHcDO3n8grCGfhN2LSSyej8VdR0rh16QUfk1LVHcqup9HRfdzcYWnH3NGf4ioXE+vRbdgayrGZ7BQMPIBdve+PCBPMAjR3iZmmDAZILfWR0Gdj8zo4J2A8pgZTND7VNjwEWybLYW7EAEuoAv3MWPG8Oabb9KnTx9KS0t5+OGHOeGEE9i0aRNlZWVYLBZiYmL2eU1ycjJlZWWH3O7jjz++39h5IYLdlhKtZf3ZH3IAiAkzc/0J3blkVAbxEcdWJMVHWBmZGcepA1NpdHhYkF3Od5vLqGpyMWttMV9vKOGMQWmcMzQNm1m64oUkVxOsex9W/Beqd/z6eMZxMOJqbXyln2c13lblo7xZW7t9TOoR7HeKQnNUdwqjulOYeBLRK54kMaMXcbuXYa/fQdd1z5Cx7lnqU46jovv5VHU9FZ+544dhmBzVdNkwg+Sc9zCoHhwRXcmZ8AJN8QM7PIsQgSLaqjA6xciyEi8/7nTzx8HS6+SQ+p75a+E+5VE54SdEAAvowv20005rvT148GDGjBlDt27d+Pjjj7Hbj75F7r777uPOO+9svV9fX09GRsYxZRUiUNU73Ly5rIDledqY9CibiZtP7MmVY7sRYfX/R0CEzcRZQ9I4fVAqqwqq+WZjKdvLG/liXTELs8u5eFQGE3slYpDZfkNDQxn8/KrWwt5Soz1mjYYh02DEtZDcv93eekGh1to+Ns2IzXSU+5tioI4I6obeiyEqhfid35K443Oid68gumw50WXLyVr5d6q7nUpF9/OoSx7b7uNEFa+TlG1v02XjC5jcDQBUdjudHcc9htcS1a7vLURncHI3E8tKvMzb6ZHCvS09T9Z6PNUUQPkWbeJPIURACujC/fdiYmLo3bs3ubm5nHLKKbhcLmpra/dpdd+9e/cBx8T/ltVqxWqVD3IR/FbmV/HGknzqHR4MCvhUmHn1KEZlxbX7exsNCsd1j2dMVhy/FNTw3s872V3v5NVFO/h+cxnXn9Cd7okR7Z4jpFXlaevz6qFxN6x9F7K/BZ9beywqHQZdpHXNNNvB64KSdf5939pCwE6LB1aWat3wJ7XVTf4w+cwRVPS8iIqeF2FpLCYx/wsS8z7D3lBA4o7PSdzxOS57EtVdTqYmYwp1Kcf5dYy5uaWChPwvScl+B1tjEQBNsf0oGHE/9anj/fY+QnR2k7uZ+edyJz+XeqlzqkRb5UTxQVnCofskyPkWts2Rwl2IANapCvfGxkby8vK48sorGTFiBGazmXnz5nHBBRcAkJ2dTWFhIWPHyhgdEdpcHh+vLd7BktxKALrE2jl7SBr/XZiH3dKxXdUVRWFUVhxDu8bw3eYyPl9bTEFVM3//cjPnDU/nnKFpmAwyBtHvqvJgxnC9U+yrvhiWPq9d2osvE3iM9fU23D7oEqnQI8b/+5crIp3iQdMpHvgnIirXkbhjFgkFs7G0lJOy/QNStn+AxxxBXeoJ1CeNoiFpOM2x/VAN5iN6H6OrnpiSxSTu+IyYksUoe+YEcNmTKBx6FxXdz5eZoIX4nW7RBnrFGthe42NhkYdzeh7Z313I6XvGnsJ9Nkz8q95phBAHEdCF+913381ZZ51Ft27dKCkp4aGHHsJoNHLppZcSHR3Nddddx5133klcXBxRUVHceuutjB07VmaUFyGtusnFM99ns6OyCYMCZw9J5/zh6eyqadE1l9lo4MzBaUzolcgbS/NZmV/Np6t3sbawhptP7El6jExI51d7W9pPuAuiO2AokKNOW1KoaCWoe1YESOgNvaZCfI/2f/+9akwwD1ZXWQGVSRmmo16a8LAoCo2Jw2hMHEbByAeI2r2SuKLviSuah6VlN/GF3xJf+C0AXqONpvhBOCK74QxPxRWWiissBdVgQvE6MXhdGLxOrE3FhFdvJrx6C7bGwn3eriFhGBU9zqci6zx85rD2+72E6ORO7mZie42LHwvcUri3pc9poBigdL3Waymmq96JhBAHENCF+65du7j00kupqqoiMTGR448/nhUrVpCYqC0N9Nxzz2EwGLjgggtwOp1MnTqV//73vzqnFkI/eRWNPPN9NjXNbiKsJu44pTf9U/cd85pb3ui39zuabUXZzdx2ci+W5VXxv6X55FU0cd+sDVw1NpOT+ya1b5EViqIzIL5n+23f1aitvb7lK/DuWa0jbRgMuRSS2m/8+kH5vEATxY0qRgWO79JxX3Oq0Upd2gTq0iaQP/oRIqo2El2ymMjKNURWrMXkqiOqfBVR5auOaLuOyG5UdjuDiu7n44ju3k7phQgup3Qz8fI6FwuLPLi9KmajfLccVHgCdB0LO5dqq30cd5PeiYQQBxDQhfuHH354yJ/bbDZefPFFXnzxxQ5KJETgWp5Xxcs/5eHy+kiPsfOXqX1Ijvp1aTebWesufPtH6/z+3nu3fbgURWF8zwT6pUbxyk95bCiuY+aSfPLKG7l2fNYRr/0udODzaOMhN3z4a+t+Yj8YcU3AjJEcmWIkSq+xrYqBxoQhNCYM0e6rPmz1+URUbcDaVIKlqQRrcxmW5jJARTVY8Bmt+IxW3NY4muIH0hTbn+a4/nisMfr8DkJ0YkOTjMTZFKodKqvKvIxLD+hDXv31PWNP4T5bCnchApR8igkRBOZt283ri/MBGJoRw60n9STMsu+fd2q0nWcvHoLD7fPre9vMhqNedz0u3MK9p/Xl6/UlfPhLEQtzKiiqaeaOyb2PeYk60Y6K18CqV6Ful3Y/uisMvxoyRuu+lJDDo7bebnPt9o6kGHBE98AR3YHDBoQIYUaDwkldTXya4+b7Ao8U7m3pewZ8dz/sXAbN1RDW/pPYCiGOjHyKCdHJ/bZon9I/mavHZh50qbWjLbDbk6IonD00ncyEcGbMzyWvoon7P9/IbZP37+YvdNZQBqteh6IV2n1bNAy7EnqeEjATpP1UpC0BF2dTGJwYwD036ovB7ed5J8x2beZ+IQQAU7L2Fu5uHhpnlaFYhxKbCcmDYPdGyPkOhl6qdyIhxO9I4S5EJ/bbov20gSlceVy3TntgMrhLDP86dyDP/pDDzupmHvtmKzdP7MH4ngl6RxM+D2yaBes/0JZ2UwzQ7yxtHLslcJb0U1WVOTu0wn1MqhFDoP4t1BfDrBvaZ9vnvyrFuxB7TOhiwm6CkkaVjZU+BicGxgnGgNX3DK1w3zZbCnchApAU7kJ0UsFUtO+VFGXj4XMG8NLCPFbmV/PCglyqm1ycOTi10/9unVZFNiyfATUF2v3UITD6xoCcdXjNbi876rShICOSA/gAfW9L++CLITzJP9tsKocNH/u/FV+ITsxmUpjU1cQ3Ozx8l++Wwr0tfc+An56A3HngatLWeBdCBAwp3IXohJbkVrYW7acPTOGKICja97KajPz55F68u2In324q4/2fC6lqcnHVcd0OOgRAtAO3A9a+DVu/BlSwRsGo66H7ibqPYz+Ydza7W2+HmQMz4z7CkyBaWseFaE9Ts8x8s8PD3HwPfxmtd5oAlzJI6zJfU6B1lx94vt6JhBC/EcADAIUQB7KltJ6Xf8oD4NQgK9r3MigKV43N5MrjugHw3eYy/jNvO26vfyfWEwdRvhW+vhW2fgWo0H0SnPsS9JgUsEV7ZYuPb3a4236iECKkTMowYTZAXq2P3Bqv3nECm6LAgPO025s/1zeLEGI/UrgL0YkU17bw7A/ZeH0qx3WPC4ru8Ydy+qBU/nxST0wGhZ8Lqnn6+2ycHjnwajdeN6x5B+beAw2lEJ4Ikx+BE+7SJqILYB9tc+PyQe9Y+VoTQvwqyqowfs+M8nPzPTqn6QT2Fu7bfwBno75ZhBD7kCMcITqJuhY3T87dRpPTS6+kCG6e2DNwJ9/yo7E9Erjn1L5YTQY27KrjybnZONxSvPtdbRF8czds/AhUn9bKfvYMSB+ud7I2eX0q729xAXBGdxkBJoTY16lZewt36ZXTppTBENcdPC2w/Tu90wghfkMKdyE6AZfHxzPfZ1Pe4CQp0srdU/pgMYXOn+/A9GjuPa0vdrORLaX1PP7tVppd0nLiN3nzYM7tUJ0H1kiYeK/Wyh5AM8YfyvxCD8WNKjFWhRO6SOEuhNjX5EwTBgU2VfrY1SBDrg5JUaD/udpt6S4vREAJnSN/ITopVVV5dVEe28sbCbcauefUvkTZzXrH6nB9U6L42xn9CLcaydndyKNzttLokOL9mHgcsPQ/sOQ58Di1GePPfhEyj9c72RF5Z7PW2j6trxmLMfh7oQghjkyC3cDIFG1G+e+k1b1t+3SXb9A3ixCilTRNCBHgVuZXszSvCoMCd57Sh7QYu96RdNMjMYIHz+jPv77ZSn5lE499u5W/nd6PcKt8lB2x2iL46XGoLQQUGHoZDLoYDJ1ruaTcGi+LdnlRgMv7W6h3qnpHEkK0o9zao2sxH5Jo5OdSL7Ny3IxJ/fU7I9wCWdGd63Ov3aUMgrgeWi+snO9g0IV6JxJCIIW7EAFvzsZSAC4b3Y3+qVE6p9Fft/hwHjyjP4/O2UJ+ZRP/nruNe0/rS5hFPs4OW+FyWPysNobRFgMT/qK1tndCr2/QWtunZJroGmVgU4XMfyBEMLLt+Yi/fX7LMW1nc5WPM2c17fPYgkvCpXj/rb2zyy9+WusuL4W7EAFBjnSFCFANDq07n9enMiozltMHpeicKHBkxIVx/+n9eHTOVraXN/Lk3GzuPa0vNrMceB2S6oP1H8L697X7KYNgwl/BHqtvrqNU3uxjVo72d3L9EIvOaYQQ7Sk1wsCzk2wcywipF9c6KW5UOa+niVGpJoobfby41kWTy385g8bewn1vd3lrpN6JhAh5UrgLEYB8PpVnf8gBIC7cwk0TewT1sm9Ho1t8OPef3o9/zdlC9u4Gnvxu257Z56V4PyB3s9bKXrRCu9/vLBh5HRg679fAO5tduHwwLMnIiGT5fxci2KVGHNvUTMd3MfHRNjd5dT4u7ifTPB1S8gCI7wlVuZA9FwZfpHciIUJe5z1iEyKIvfRTHqsKagC4bHRX6QZ+EFkJ4dx3ej/+NWcrW0sbePb7HO6e2gezUQ7I9tG4G+Y9rI1nN5jguOnQ6xS9Ux2TZrfKO5u11vYbhljkxNZedUWBtR0hAsiYVCMfbXOzqdIn82G0ZW93+UVPad3lpXAXQndSDQgRYNYV1ba2tgMhPRnd4eiRGMG9p/XlsW+2sqG4jhcX5PLnk3phMEghB2jF+i9vgKMW7HEw6X5I7Kt3qmP2WY6bWqdK1yiFKZnyVYbJql0verp9titEEEiNMNAtSmFnvcqqMi/do+Uk7yHtLdxzfwBHPdhknh0h9CRHO0IEkBaXlzs/WofXpzKhVwKLtlfqHalT6J0cyZ2n9ObJ77JZmV/NzKX5/PH4LGmFBVj+AnhdEJsJJ/8DwhP0TnTMvD6V1zc4AbhukBWjnKTR/l9PuEtb1s9fTNag2F+E+K1x6SZ21rtZXuKhe7TMjXFISf0hoTdU5sC22drqI0II3UjhLkQAefzbreyobCI5ysrNJ/aQwv0IDO4Sw62TevKf+duZv62cCKuJS0d31TuWfjZ9pl17XZA2HCbeC5YwfTP5yQ8FHnbWq0Rb4aI+Zr3jBA4psoVo03FpRj7Y6mZLpY8Gl3SXPyRF0ZYJXfCoNrGpFO5C6Er6CAkRIH7KqeDt5TsBePqiIUTapCA5UmO6x3P98d0B+Gp9CV+tK9Y5kQ5UFb5/EJb9n3a/61g4+e9BU7QDvLZnCbgr+1sIM0truxDi8CWFGegRY0AFNlXK8pFt2ju2PX8R1Jfom0WIECeFuxABoLbZxV8+WQ/A1WO7cUKvRJ0TdV6T+iZx+Ritpf2DVUWsyq/WOVEH8nrgq1t+LdpBay3pxDPH/96qUg+rd3uxGOCqgdLNVQhx5MalaatQbKiQwr1NsZnaCWBU2Pip3mmECGlSuAsRAB74YhPlDU66J4Zz72n99I7T6Z05OI1zh6YB8EWotLq7HfDJ1bD2XVAM2vrsoHV1DCL/Wa2N4b6gt5mkMPkKE0IcuTF7Cved9dJV/rAMvli73vCRvjmECHFy1COEzuZuKmX2hlKMBoXnLh6K3SLrUfvDxSMzmNwvib2HZat31uiap1056uG9C7XJg4xWuPgd6HuG3qn8bnWZhyXFXkwG+NNwme1cCHF04u0G+sTJIfBhG3AeGC2wexOUbdI7jRAhSz61hNBRXYubB7/cDMDNE3swJCNG30BBRFEUrh2XxeAu0QA89s1WfikIwm7zzdXw9tlQsBgskXDFZ9DvTL1TtYvn97S2X9jbTEakfH0JIY7e2DQ5SX7Y7LHQa4p2e+PH+mYRIoTJkY8QOnri261U7Okif8tJPfWOE3QMBoULR3QBwOnxce2bq9haWq9zKj9qqoS3zoaStRAWD9fMhqwT9E7VLlaXeVi8S2ttnz5MWtuFEMdmTKqJvQOJypp8umbpFAZP0643fAI+mRtACD1I4S6ETpbnVfHBz0UAPHH+YGxmOfvfHkwG7WOuf2oUDQ4PV73xM4VVzTqn8oOG3fDmGbB7I4QnwTVzIG2o3qnazd6x7ef3MpMRJV9dQohjE2NTyIrWPkuW7PLonKYT6D0VbNHQUAIFS/ROI0RIkqMfIXTgcHu5//ONAFw+piujs+J0ThT8/n5Wf/qmRFLR4OSKmSspb3DoHeno1RXDm6dDxTaITINrv4Wk4J3UcO1uD4t2eTEqcIuMbRdC+MngRO0weNEuaUFuk8mqjXUHmaROCJ0EzxpBQugsv7KJJufhnbV/a1kB+ZVNxIVbOHtIGpuK6/Z7Tm55o78jhrQIq4m3/zCaC19eTmF1M1fN/JmPbhxLtN2sd7QjU7dLa2mvKYDornD1VxCXpXeqdvWf1dq67ef1MtNVWtuFEH4yMMHIF7kedtT52F7jpVes9Hw7pMGXwOo3YctXcPrTYAnTO5EQIUUKdyH8IL+yiUlPLzzi11U3uZj26opDPsdmlkLFX5KibLx73RgueHkZ28oa+ONbq3j7D2M6z0z+9SXw5pla0R6bCVd/DTFd9U7VrtaVe1lY5JHWdiGE34WZf10uc1aOm3vGdJLvAr1kjNG+c2oLIfsbGHSh3omECClSuAvhB3tb2qdP6kl6jP2gz/OpKq8u2kFhdTMD0qK4fEy3Q27XZjaQGn3w7Ykj1zU+jLf/MJqLX1nOqoIapr+/hleuHIHZGOAnSOpL9xTt+RDTTRvTHt1F71TtSlVVnlipDWk4t5eZzOgA/z8SQnRaX2x3c/coK0aD0vaTQ5XBoE1St+gpWPuuFO5CdDAp3IXwo/QYO1kJ4Qf9+U855RRWN2M1Gbh5Yg/iI6QFUQ/9UqN445pRXDlzJfO3lfPXTzfwzEVDMATqAVtDGbx1FlTnaa0d18wO+qIdYH6hhxUlXixGuGOk/K0IIdpHuBlKm1RWlHgZ30UOjQ9p2BVa4b5jwa+9v4QQHUKaL4ToII1OD++vLATgwhFdpGjX2ajMOF66fAQmg8Lna4t5ZPYWVFXVO9b+9i75VrUdojPg6tlB3z0ewONTeWyFNpP8tQMtdJF124UQ7WTCnmL9s+1unZN0ArGZ0P1E7fba9/RMIkTIkSMhITrIx78UUe/wkB5j59SBKXrHEcCkvkk8fdEQAN5cVsAL83N1TvQ7jjp45zyozIaodG1Me+yhh1cEi4+2ucmr9RFrU/iTrNsuhGhHJ3XVCve5O9w0uQPwBG6gGX6Vdr32XVnTXYgOJP2BhOgAOyoa+XHLbgCuHZ/Zura40N+5w9KpaXbx8NdbeOaHHKLDzFw1NlPvWOBqhvenQdkGCE+Eq4J/9vi9Gl0qz/2itbbfNsJKtDUAhjDUFQXmtoQQx6xvnIHMKAMF9T6+y3dzfm+L3pECW98zwR6rremeOw96T9E7kRAhQQp3IdqZT1V5Y2k+KjCuRzwD0qL1jiR+59rxWdQ0u/m/edv5+5ebCbOYuHCEjmPIPS74+EooXA7WaLjyc0joqV+eDvbqeieVLSqZUQYu66fzcn2mPa39i55uv20LIXSlKArn9zbz7C9OZuVI4d4mkxWGXAor/gtr3pLCXYgOIoW7EO1sYXYFeRVN2M3GNmeRF/q5Y3IvGhxu/re0gL9+up4wi5HTB6V2fBCfF2ZdD7k/gjkMLv8EUgZ1fA6dlDX5eHWDtm77vWOsWIw6t7aHJ8AJd4HH6d/tmqzatoUQAeG8XlrhvrTYS2mjj9QI6Rl3SMOu1Ar3nLnQsBsik/VOJETQk8JdiHbU5PTw0SptQroLhnchLlzO4uslt7yxzeecPyyd4poWvt+ym1s/WMvuOgejsuIO+Nxwq+mQKwgcsao8cNRrs/VmzwGDGU55RCvwStYd+fYqc/yXrQM99bMThwdGphiZmhUgX1FSYAsR9DKiDIxOMfJzmZcvct3cPFR6xBxScn/oMgp2rYL1H8Dxt+udSIigFyBHRUIEp1lri6l3eEiLsTF1oJyN1oPNrLWa3P7RuiN6nden8vDsLYd8zoK7T/RP8V6VBzOG7/uYzw3f3H3s2zbbj30bHWR5iYfPcrRZnf92nBVFCYCx7UKIkHF+bzM/l3mZlePmpiEW+Qxqy/CrtcJ9zdsw/jaQfy8h2pUU7kK0k5LaFr7bVAbAlcfJhHR6SY228+zFQ3C4fYf9Gq9P5f2fC9laWo/ZqHDNuKx9CvTi2hZeXJBLk9Pjn5DOhn3vD7kUMsYc+3bNdm02+k7A6VX522IHAJf3NzMsWb6ehBAd6/TuZv6+1MH2Gh8bK30MTjTqHSmwDTgP5t4L1XmwcylkHq93IiGCmhwZCdFO3lmxE6+qMiwjhqEZMXrHCWmp0Ufe6nzfaX15+vtsNuyq4+3lBdx7al/6pka1Qzpg06xfb4+6Hvqf0z7vE8BeXudiR62PBLvCX0fb9I4jhAhBUVaF07JMfJnr4YOtLgYndp4eS7qwRsDAC7QJ6la/JYW7EO1MmgCFaAfrimpYV1SLUVG48jiZkK4zMhsN3HVKHwanR+P0+Hhi7ja2ldX7/43WfwTL/qPd7n1qSBbtO2q9vLhGm/ztoXG2wFj+TQgRki7tp81F81Wum0aXrOnephHXaNdbvoDGcj2TCBH0pHAXws88Ph/vLN8JwKkDU0iNkTP2nZXFZOCuKX0YuKd4//fcbeTsbmj7hYcrbz58+adf7/ea6r9tdxKqqnWRd/lgYoaRM3tIRzAhhH7GpBrpHm2gyQ1f57n1jhP40odD+kjwurRWdyFEu5HCXQg/+37zbkrqHETZTJw/vHOMLxYHZzEZuHtKbwakReFw+3j8263kVzYd+4bLNsJHV4HPAz0na4+F4MQ+s7a7WV7ixWqEfx5vl8mghBC6UhSFS/uZAfhgq0vnNJ3E6Bu0619mgldOdgjRXqRwF8KPmp0eZq3ZBcDFozIIs0jrYTCwmoz8ZWqf1uL9zWX5x7bBul3w3kXgaoDME2DiPf4J2snsbvLx6HKti/xtI6x0jZKvJCGE/i7obcZigA0VPjZVePWOE/gGnAvhidBQClu/1juNEEFLjpKE8KMF2eU0ubx0jQtjUu8kveMIP7KajPx1al+GZsTg9mrjHlfuqDryDbXUwrsXagc4if1g2rtgtPg3bCfgU1XuWtBCjUOlf7yB6weH3r+BECIwxdkNTM3STrx/sE1a3dtkssKIa7XbP7+mbxYhgpgU7kL40Yod1QBcPqYrBoN0+Q02FpOBO0/Rus0D/OubrXy9vuTwN+BxwkdXQMVWiEyFKz4Fe0z7hA1wMze4WFLsxWaC/zvZjtkofy9CiMCxd5K6L7e7aXLLJHVtGvkHMJigcJk2FEwI4XdSuAvhR15VZUiXaAZ3idE7imgnZqOBS0Z1BcCnwm0fruX9lYVtv9Dngy9uhoLFYImEyz+B6C7tnDYwbar08uTPWhf5B8fa6BkrayULIQLL2DQjmVEGGt0wWyapa1tUKvQ7S7u98hV9swgRpKRwF8IPtpRoy4QpwOVjZPm3YGfc05vi1AEp+FS4//ONPPtDDqp6iFaZef+ATZ9pLRLT3oGUQR0TNsC0uFVum9eC2wenZJq4bM8kUEIIEUh+O0nd+zJJ3eEZfaN2vfETaK7WN4sQQUgKdyGOkaqqzFyiTVY2MjOWjLgwnROJjjJ9Ug/+fHIvAP5v3nbum7URj9e3/xN/fg2W7lmr/ewXoMekDkwZWP653EFerY+kMIV/T7DJLPJCiIB1QR8zZgOsL/exuVImqWtT1+O0k9IeB6x9R+80QgQdmfJaiGM0Z2Mp2XvW9j65X7LOacQB1ReDu8V/26v1AKBU5nDnAAvJvhgeXFjLh6uKqKysYMapsdjNe86LFiyG7x/Ubo/8IyT1g5J1+26vMsd/2QLY5zku3t+qdTl9dpKdOLucOxZCBK4Eu4EpmSbm7PDw7mYXj0+06x3Jv/z53WONhPgeWqv7V7fAz6/DcdPBKKWGEP4if01CHAOnx8u/525rvR9lk26/Aae+GGbd4N9tqinA9TDrejAUcDmQYBrJn9238GM+THt5Ca9aniVFqdn3db+8rl0OxhxkB4W/sbrMwz0/OQCYPszC8V3k60cIEfiuHmhhzg4Ps7a7+esYK7G2IDjhuPe7Ztb1/t3urWtg0IXw40NQVwhbvtDuCyH8Qo6chDgGby/bSVF1C3HhFqqbZAxcQNrb0j74Ygj30xJ9DUZYC5xwN8Rqre9Tgfcqm7h+mYkNrh6czX941fg0Q70btGXfRv0RDIeYhM1sh6h0/+QLMMUNPm78vgWXD6ZkmrhrlFXvSEIIcVhGpRgZmGBgU6WP97e6mT4sCD6/otLhvFf81xOtrggWPwPOBu27bPSNsPAxbYjYwAtAhkQJ4RdSuAtxlGqaXMyYvx2AK4/rxn/mbdc5kTik8CSI9lNhrPoAB0RnQPyvxfjIePgq1cd13zaRU2viYuedPBX9GedMvhIsoTn3QZNb5Y/fNVPZotIv3sBzJ9kxyEGcEKKTUBSFawdZuGuBg7c3ubhhsCU4lq9szxPFo6+Hpc9D2QbYsTCk53URwp+CoL+PEPqYMT+XeoeHvimRnNTXTy25otPLiFT4LPE1JhtW48LCbXWX8vgaAx5f6K0D7FNV7pjfwtYqHwl2hdenhhFuDoIDXiFESDmzh5kEu8LuZpVv8j16xwl8YXEw/Crt9t6JWYUQx0wKdyGOQkFlE++sKADgb2f0a10eTAg2fEhk0XxesfyHm3vVAfDKeheXzW5md9MBZpwPUqqq8vclDr4v8GAxwitT7KRHyleOEKLzsRoVrhxgAeCNDc5DL/0pNMf9CRQj7Fiw/4SsQoijIkdRQhyFf8/dhturcmKfRE7olah3HBEoCpbAuvcAMI69mXtOyuCFyXYizPBzqZfTP21i8a7gb63ZW7S/u8WNAjx1op0RKTIySwjReV3e34zFAOsrfKzZLUvDtSm2Gww8X7u97P/0zSJEkJDCXYgj9EtBNd9uKsOgwH2n9dM7jggUVbmw5Dntdr9zoPdUQOti+fUF4fSLN1DlULlqTjPPrHLg9gZni42qqjy01ME7rUW7jXN6ymoLQojOLcFu4Jxe2mfZGxtlMtrDMu7P2vXmz6GmQNcoQgQDKdyFOAKqqvLonK0ATBuVQZ+USJ0TiYDQXA3z/wleJ6SPgJF/2OfHWdFGPj83nEv7mVGBGWtcnP9FEznVwdVqo6oq/1jq4O3NWtH+5Ik2Luxj0TuWEEL4xbWDtM+zufkeShpDZ+jTUUsdDD1O0iZ0Xf6i3mmE6PSkcBfiCMzeUMq6olrCLEbumNxb7zgiEHicsOBRaK6C6C4w4a8HXPbNZlJ4fIKd/zvZTrQVNlb6OPOzJl5e58QbBBPXubwq9y9y8Naeov3fE21cJEW7ECKI9I83MjbNiFeFtzZJq/thGX+7dr3mHWiq1DWKEJ2dFO5CHCanx8u/524D4MYJPUiKsumcSOhOVbWxe5U5YI2Ek/4OlvBDvuTsnma+vyiCk7qacPngiZVOLvqqmexO3Ppe1eLjitnNfLBNK9qfmGjj4r5StAshgs8f9rS6v7/VRZ2z8590bXdZEyBtGHhaYNkMvdMI0alJ4S7EYXp72U521bSQHGXl+glZescRASAh7zPI/0mbOXfifRCVdlivSw43MPNUO09OtBFhhjW7tYnrHl7qoL6THQhuqfJy9qwmfi7zEmGGmafamSZFuxAiSJ3czUTvWAMNLnh7s7S6t0lRYOI92u2fX5NWdyGOgUzzK8RhqGlyMWP+dgDumtKHMIv86YS6CYb1JOe8q90Zc6M2lu8IKIrCxX0tHN/FxD+XOfg238P/Nrn4Os/NvWOsnN/bjEEJ7GUGv85189efWmjxQGaUgddPtdMzdv9hAoeSX+elyY/Hvrm1Mu5UCNF+DIrC9GFWbpvfwhsbXfxhkIVwc2B/Vuuu96mQOhRK12mt7qc8rHciITolqT6EOAwz5udS7/DQNyWSC4Z30TuO0Fm4s5z/mF9EQYVeU6HP6Ue9rbQIAy9NCWPxLg8PLXWwo9bH3QsdvLHRxR0jrUzuZkIJsAJ+d5OPh5Y6mJuvLW13QhcjL0wOI9p6ZDnz67xM+rCpPSJik283IUQ7OaOHied+MVBQ7+P9LS6uH2LVO1JgUxQ48T74YJrW6j7uVghP0DuVEJ2OHNoI0YaCyibeWVEAwN/O6IfREFhFlOhYBq+TE3L/TazSSHN0L8LG3OSX7Z7QxcTcC8N5c5OL/1vtZEuVj+u/a2FQgoE7RlqZ1FX/At6nqny0zc1jKxw0uMBkgJuGWLh9pBXTUfxd7G1pnz7MQnqE/0Zu2UyQ6sftCSHEb5kMCjcPs3DPTw5e3eDiygEWbCY5Njik3lO1se4la7W5YU55RO9EQnQ6UriLkJRf2UST03NYz33sm624vSojusUSG2ZhU3Hdfs/JLW/0d0QRiFSVrK0vE9ecT6UaRdXwe+hj9N8a5Rajwg1DrFzUx8xr6128ucnFxkoff5jbQv94A1cNsHBOTzP2Du6Wqaoqy0u8PP+Lk5/LtEn0BicaeGKinf7xR9Y1/kDSIwxkxUihLYToPM7rZeY/q52UNKp8ku3mygEyt8ch7W11f//iPa3uf5ZWdyGOkBTuIuTkVzYx6emFR/y61TtrOHPGkkM+x2aW4iOYJe+aS1LpAnwYuNV9K3+zJ7bL+8TaDPx1jI3rBlt4db2Ltza72FLl495FDh5b4eDCPhYu62c+4vHkR8qnqvxQ4OGldS7WlWsFu90Ed42ycu1Ai/Q+EUKELItR4cYhVh5a6uDldU4u6WvGbJTPxEPqNUVa3YU4BlK4i5Czt6V9+qSepMfYD/o8n6ry8k957KppYVRmLOcNO/TYdpvZQGr0wbcnOreI2m1kZs8EYH2XK1ieO6Dd3zPebuC+42zcNNTCJ9lu3t3iorBe5Y2NLt7Y6KJXrIGpmSamZpkZmGDwS1d6VVXJrfXxfb6Hz7e7Wyd7sxphWl8zNw6xkh4pJ6iEEGJaXzMz1jgpblT5fLtblsFsi7S6C3FMpHAXISs9xk5WwsHX3F6SW8mumhZsZgN/GJ9FTJh8IYcqs7OWPhuexKB6qEoax9aUcyDX2WHvH2szcMMQK38cbGFRkZd3t7hYWORhe42P7TUuXljrIjVcYXiykSFJRoYkGhmYaDysmY69PpWiBpWcGi+ry7z8UOBhR92vM7NHWuDK/hauHWQhMUwKdiGE2MtmUrh+sIXHVzp5aZ2L83ubj2q+j5DSawqkDYeSNbD4WTj1Mb0TCdFpBHTh/vjjjzNr1iy2bduG3W5n3Lhx/Pvf/6ZPnz6tzznxxBP56aef9nndjTfeyMsvv9zRcUUQcXq8fPBzIQDnDE2Xoj2EKT4PvTc8hcVZTXN4F3IH3AqN2oGZP5ceC7dAVvShu74bFIUTu5o4sauJOqfKgkIP3+W7WVjkobRJZc4OD3N2aD1KFCDerpBgV0gMU0gMM2BSwOUFp1fF6YXdzT5ya3w4vfu+j8UA47uYmJJp4ozuZqKOcLZ4IYQIFZf3t/Dyehf5dT4+zXZzST85XjgkRYGTHoB3z4dVr2nLqcZ20zuVEJ1CQBfuP/30E9OnT2fUqFF4PB7uv/9+pkyZwpYtWwgP/7Wl9Prrr+eRR34dJxMWFqZHXBFEZm8opbrJRUKEhdMHpuodR+io6/a3iardjMdoJ3vIffhMdmwmrWC/fX6LX99rwSXhbRbve0VbFc7tZebcXmYcHpXVZV7WV3jZUOFlfbmX0iaVyhbtsq0awHvQbVmN0CPGQN94I5MytBMDkRYp1oUQoi0RFoXpwyw8utzJc784dZlAtNPpeTJ0PxF2LIT5j8IFr+mdSIhOIaAL97lz5+5z/8033yQpKYnVq1czYcKE1sfDwsJISUnp6HgiSFU3ufh6fQkAl43uhsUk3YNDVXzZYtIKvwIgd+BtOMLTAW2psWcn2XAc3sIEbSpu9PHiWlfr8mhHymZSGN/FxPguv36kV7X42N2sUtGsUt7so6JZRUUr0q1GBasRYmwKvWONZEQqMtGcEEIcpSv6W/jfRhfFjSpvbnZx81BZ171Nkx+GVyfCxo9h3C2QOkTvREIEvIAu3H+vrk5bhisuLm6fx9977z3effddUlJSOOuss3jwwQcP2erudDpxOn8dn1pfX98+gUWn9OGqQpweH32SIzmue1zbLxBByd5YRI/NLwCwK/MCapKO2+fngb5OeLzdQLwdiNc7iRBCBDebSeGOkVbuXujgpbVOLutnIVqGGB1a2lAYeCFs+hR+eAiu+kLvREIEvMA+8vwNn8/H7bffzvjx4xk4cGDr45dddhnvvvsuCxYs4L777uOdd97hiiuuOOS2Hn/8caKjo1svGRkZ7R1fdBJ5FY0s3l4JwJVju/lllm7R+Ri8DnpveBKjz0lt3BCKel6mdyQhhBAB7LxeZvrEGah3wX/XdtzkpZ3ayQ+CwQw7FkDefL3TCBHwOk3hPn36dDZt2sSHH364z+M33HADU6dOZdCgQVx++eW8/fbbfP755+Tl5R10W/fddx91dXWtl6KiovaOLzoBVVV5Z/lOAE7omUCPxAidEwldqCpZW18hrKkIlzWO7YPuAKV910sXQgjRuRkNCn8ZpXWRf3OTi9JG/01eGrRiM2HUH7XbPzwEPvk3E+JQOkVX+VtuuYXZs2ezaNEiunQ59FraY8aMASA3N5cePXoc8DlWqxWrVcYfiX2tzK8me3cDFqOBaaOkF0aoSiyZR1LpAlQM5Ay6C48lRu9IRy2/znvU4+YP5nBmvxdCiFB0cjcTo1KMrCrz8p/VTp6YaNc7UseqzDny1/Q5Fda8DWUbYMkz0POUX39mjYT4Ax/LCxGKArpwV1WVW2+9lc8//5yFCxeSlZXV5mvWrVsHQGqqzAQuDp/L4+P9ldryb2cNSSU+Qk7shKKwhgK6b3sVgMKel9EQO0DnREcvv87LpA+b2mXbRzL7vRBChApFUbh3jJULvmzm42w3fxhkoXdcCHxWmvecoJh1/bFtZ/6j2uW3bl0jxbsQewR04T59+nTef/99vvzySyIjIykrKwMgOjoau91OXl4e77//Pqeffjrx8fFs2LCBO+64gwkTJjB48GCd04vO5NtNpVQ0OokLt3Dm4DS94wgdGDwt9N7wFAafi5r44ZRknq93pGOyt6V9+jAL6X6aSO9YZ78XQohgNyLFxJRME98XeHhoqYP3zwwL/vlyotLhvFfAfZRLpHpdsPBxaKmBXlOhz2lQVwSLnwFng3+zCtGJBXTh/tJLLwFw4okn7vP4//73P6655hosFgs//vgjzz//PE1NTWRkZHDBBRfwwAMP6JBWdFa1zS6+XKct/3bJqAxs5hA4Oy72pap03/oS9uZinNZ4cgfeDkrHTwGSW+u/8X17t5UeYSArxr+/i79y+vP3FUKIQPHgWBs/FTWyvMTL7DwPZ/U06x2p/UWlH9vrR98IPz2hTVQ3eBpEy5BFIX4voAt3VVUP+fOMjAx++umnDkojgtXHv+yixe2lR2I443sm6B1H6CCp+HsSyxahKga2D74bjyWqQ9/ftueT+Pb5R9lacRjb9ue2/J3TnxmFEEJvGVEG/jTMynO/OPnXCgcndTMRbg7yVvdj1W08pAzWxrr/MhMGXaR3IiECjhwuiZBWUNXEwuxyAK48LhNDsHdnE/sJa9hBVvbrABT2vIKGmH4dniE1wsCzk2w4PP7drs3k3/Xm2yOnvzMKIUQguHGIhc9yXBTWq/zfaif3HWfTO1JgUxQYfQN8/WfYuRRSh+idSIiAI4W7CFmqqvLuikJU4LjucfRJidQ7kuhgRk8zvdc/hcHnpiZhJCXdztUtS2cpXjtLTiGE0JPNpPDQOBvXzW1h5kYXF/Ux0zNWhuIdUmwm9Dkdts2GzZ/rnUaIgCNHYCJkbS6pZ0tpPWajwmWju+odR3Q0VaX7lv9ibynFaUsgd8CfdRnXLoQQIjid3M3MyV1NeHzwj6WONoeACmDo5WCNgoZSvZMIEXDkKFWErDkbtS+FswankRgpXdhCTfKuuSTsXoJPMZIz6C8dPq5dCCFE8HtovA2LEZYUaxPViTZYI2HYFb/eb67WL4sQAUa6youQVdfiJiHCwtlDZfm3UBNen0dm9kwACntdRWNMH50TCSGE0Iu/V7gIt0BWtNYtvmuUgel7Jqp7aKmDselGEuzSbnZIvabC1q+gbhcsfwF6nqR3IiECghTuIuQU1/46I/ZVYzOxmmTMWSgxupvoveFJDKqH6sTRlHY9W+9IQgghdNCeK3osuCS8tXi/eaiFb3e42Vbt48HFDv57ij3413Y/FgYjDJoGS56BvHmw/UfoNVnvVELoTgp3EVJUVeXVn3YA0Ds5gpHdYnVOJDqUqtJjywvYWnbjsCXtGdcuB09CCBGK2mOljOJGHy+uddHk+vUxi1Hh6Ul2zv28iW/zPaGztvuxiPnNOu5z7oQ/rQBLmH55hAgAUriLkPL9lt2sLqwB4MzBaXLGO8SkFM0hvnw5PsVEzuC/4DVH6B1JCCGEjjpqpYyBCUamD7Pwn9Uu/r7UwXFpRhLDpMt8m8KToHYn/PRvOOVhvdMIoSv5xBAho8Xl5ZGvt7TeT4iw6phGdLTwxp10y3kTgJ29r6Epupe+gYQQQoSU6cOs9I83UONQeWCxzDJ/WMbfrl0vmwFlm3SNIoTepHAXIeOFBdsprm0hUQr2kGPES+/cmRhUD1VJx1GWcYbekYQQQoSYvV3mTQb4rsDDV7kyy3ybMsdDv7NA9cLXt4HPq3ciIXQjhbsICdllDbyyZ2z79ROydE4jOpTqo6ehBJurCoc9mbz+t8i4diGEELroH2/k1uFaA8IDS1rYWeffGe2D0mlPgiUSin+Bla/onUYI3UjhLoKez6dy/+cb8fhUTumfzLgeCXpHEh0obccnxCmN+BQT2YPvkXHtQgghdDV9mIWRKUYaXPCnH5txeKTL/CFFpcGUR7Tb8x6Gylx98wihEyncRdD7YFUhq3fWEG4x8vDZA/SOIzpQVNkKumb/D4D8bhfTHNVd50RCCCFCncmgMONkO3E2hc2VPv613KF3pMA34lrofiJ4HPDFzdJlXoQkKdxFUCtvcPDEt9sAuGtKH9Ji7DonEh3F3FJBr8V/RsFHuS+a8sRxekcSQgghgL1L0WnHJO9scfN1rlvnRAFOUeDsGVqX+V0/w/IX9U4kRIeT5eBEUHvk6y00ODwMSo/m6nGZescRHcXnodfi27A4KmmOyCS/zirj2oUQQgSUE7ua+NNQC/9d5+K+RS0MTDSQFW3UO1ZgqczZ9/5xN8Gip2D+PyG2G8R0O7LtWSMhvof/8gnRgaRwF0FrQXY5szeUYlDg8fMHYTRI4RYqMtY/R/TuFXhN4WQPfxDfgqf1jiSEEELs585RVn7Z7eXnUi83f9/CZ+eGE26W4xXMe3pIzrr+wD/3uuDjq45u27eukeJddEpSuIugVO9w87dZGwG4dnwWA9OjdU4kOkrMrnl02fQSAHljn8ARkaFzIiGEEOLA9o53P+OzJrZV+7htXguvTLFLY0NUOpz3Crhb9v9ZSy389IQ23r3P6dBryuFts64IFj8Dzga/RhWio0jhLoLS499spaTOQde4MO6a0lvvOKKDWBuK6LX0LgBK+1xNVeYZUCWzzwohhAhcyeEGXpli59LZzfy408O/Vzq5f6xN71j6i0o/+M+OuxmWPAc5c6HHyZDYp+NyCaETmZxOBJ1FORV88HMRAE9eOJgwi5yfCgWK10nvxbdgctXTkDCUnSPu0zuSEEIIcVhGpJh46kSte/irG1x8uNWlc6IA1/0kyDwBVJ825t3VrHciIdqdFO4iqDQ43Nz72QYArhmXyXHd43VOJDpK5i//IqJqI25LDDkTZqAaLXpHEkIIIQ7bOT3N3D7CCsADSxwsK/bonCiAKQqMnQ7hidBYBitf0juREO1OCncRVB77ZltrF/m/nirdpkJFwo4vSMl5F4Dc45/FFX6I7nVCCCFEgLpthIWze5rw+OCm75vJrpb1yg/KEgEn3A2KAXYsgB0L9U4kRLuSwl0EjSXbK/ng50IA/n2BdJEPFeFVm+ixQusWv2vQLdSmn6hvICGEEOIoKYrCkxPtDE82Uu+CK+Y0k18nxftBJQ+AwdO02yv+Cw1l+uYRoh1J4S6CQl2zm79+uh6Aq8d2Y2wP6SIfCkwtlfRZeCMGr5Oa9EkUDb5N70hCCCHEMbGZFN44NYy+cQYqmlWumN1McYNP71iBa/AlkNgP3M2w6EnwuvVOJES7kMJddHqqqnL/FxspqXPQLT6Mv57aV+9IogMoPje9F9+KtbmUlshMth//HBiMescSQgghjlmMTeGdM8LoHm2guFHlijnNlDdL8X5ABiNMuFvrOl+ZA6te1zuREO1CCnfR6X22ppg5G0oxGRT+c8kwwq3SRT4UdPvlMaJ3r8RjjiB70qt4LVF6RxJCCCH8JjHMwLtnhpEeoZBf5+PKOc3UOKR4P6CIZDhBWw6W7Dky3l0EJSncRae2s6qJh77cBMAdp/RmaEaMvoFEh0jM/YTU7LcAyB3/DC3RPXVOJIQQQvhfWoSB984MJzFMIbvaxyVfN1PeJMX7AXUZBYMu1m4vnwG1hfrmEcLPpHAXnZbb6+O2D9fR5PIyOiuOmyb20DuS6ABRu1fSfeUDABQN/jM1GafonEgIIYRoP5nRBt4/M4ykPcX7RV81USRj3g9s6OWQOgQ8Tlj4mDbuXYggIYW76LT+b9521hXVEmUz8dy0oRgNit6RRDuz1efTe+HNGHxuKrudzq7Bf9Y7khBCCNHuesUa+fSccDIiFXbWq1z8ZRO5NTLb/H4MRjjhLxAWD3W7YNkMUFW9UwnhF1K4i05paW4lLy7IBeCx8weRHmPXOZFobyZnLX0X/BGzq5aG+CHkjXtaW7tVCCGECAFdowx8ck44PWMMlDapTPuqmU0VUrzvxx4DE+8FxQgFi2Hjx3onEsIvZBYv0emU1rXw5w/W4lPh4pFdOHNwmt6RRDtTvC56//Qn7PX5OMPTyJ70Kj6TTe9YQgghxAHl1vqvK3u4BbKitVVTUsINfHx2GFd/08zGSh8Xf9XEf062c0qm2W/vFxSS+sGYm2DFi7D2HYjOgIgkvVMJcUykcBedisvjY/p7a6hqctE/NYpHzhmodyTR3lSV7isfIHr3CrymcLZNeg23PVHvVEIIIcR+bHuOrG+f3+LX7S64JLy1eI+zaxPW/emHZpYUe7nhuxbuO87H9YMtKIoMG2zV5zRtgrptX8OSZ2DsrXonEuKYSOEuOpXHvtnKmsJaIm0mXr5iBDazrNsd7DLWPUtS3qeoioGcCf9Hc2w/vSMJIYQQB5QaYeDZSTYcHv9sr7jRx4trXTS59n08yqrwv9PC+McyB+9tcfPYCid5tT7+ebwNi1GK91aj/qiNdS9dK+u7i05PCnfRaXy9voQ3lxUA8OzFQ+kaH6ZvINHuUra9RZdNLwKwY8yj1KZP0jmREEIIcWipER0z/4rZqPDo8TZ6xhj453InH21zk1/n44WT7SSFyxwwgDZZ3cR74Ju7oL5Ye8zj1DeTEEdJ/qpFp5Czu4F7PtsAwJ9O7MEp/ZN1TiTaW3z+12SuegSAwiF3Ut7rEp0TCSGEEIFFURSuHWRl5ql2Iszwc6mX0z9rYlmxn5r8g4E1Ak76O5j3TGQ8/5/gk0n9ROcjhbsIeFWNTv7w5iqaXV7Gdo/nzlN66x1JtLPoksX0XHY3Ciqlfa6ieNB0vSMJIYQQAWtSVzNfnh9OnzgDlS0qV8xp5oU1TnyyFJomOh1GXqfdLlgMc+6SZeJEpyNd5UVAc7i93PDOanbVtNAtPowXLx+OySjnm4JZZPkv9Plp71rtZ1Iw6u8gk+0IIYQIYYc7S/2/jrfx8joXPxZ6eHqVkwWFHm4fYSHWtu+x029nqg8Z8T333FBg9f8gMgVOvFfXSEIcCSncRcBSVZV7P9vA6p01RNlMzLx6FHHhFr1jiXYUUb6afvOuxehppjb1eHLHPyVrtQshhAhZxzpL/erdXq785sCv/e1M9SHl+NthyXOw8HFtibiRf9A7kRCHRQp3EbBemJ/LF+tKMBoUXrpiBD2TIvSOJNpRRMUa+s2/FqOnibqUsWSf+Aqq0ap3LCGEEEI3xzJLfVmTj0+y3ZQ2aV3ChyUZOLOHmWqHesCZ6kNG/3PBYIZFT2pd5u2xMOA8vVMJ0SYp3EVA+mp9Cc/8kAPAP88ZyPieCTonEu0pomIt/eZdg8ndSF3yWLZNeh2fya53LCGEEEJ3RztLfVaMgVGpRj7LdvNlroe15T521rs4q4cc/jPpfmjcDWvegk+vA8UI/c/WO5UQhyR/uSLgLMgu586P1gFw3fFZXDamK/mVTTQ5/TNDam55o1+2I/wjonw1/eZfu6doP45tk17D11wN7qPrFrifuiL/bEcIIYToZEwGhWn9LAxLNvLSOhdlTSpvbXYDUN3iA0Kwqzxoc+ec+Zy2NNyGD+HTa+Git6DfmXonE+KgpHAXAWVVQTU3v7saj0/lrCFp3H96P/Irm5j09EK/v5fNLGOn9RZbNI/ei2/B4HVSnzRaa2lvqYFZN/j/zUzS7V4IIURo6h1n5PEJNj7LcfNNngcfcNMPLdx3nMrl/c0YQnESWIMRzv0vqF7Y+Al8cjVc/A70PV3vZEIckBTuImBsLqnjD2+uwuH2MalPIs9ePASjQWltaZ8+qSfpMf7pPm0zG0iNlq7YekrM/YQeK+5HUb3UpE8iZ8ILWvf4+hLtCYMvhvAk/7yZyQrhMtxCCCFE6LKZFC7vbyEzysALa100e+DBJQ4+3ubi7+NsjEoNwbLAYIRzXwbVB5s+g4+vgovfluJdBKQQ/AsVgWhHRSNXzfyZBoeH0Zlx/PfyEZh/t+xbeoydrIRwnRIKv1FV0ja9RLd1TwNQ3uMCdhz3GKrBvO/zwpO0dVeFEEII4Tdpe8bM3zjEwntbXGys9HHRV82c2cPEvWNsdIkMsR6JRhOc96pWvG/+HD66As6eAcMu1zuZEPsIsb9MEYh2VDRy2WsrqWpyMTA9itevGYndEqJjroKc4nWR9fODrUV78YCbyBv75P5FuxBCCCHa1Vk9zCy4JIJL+5pRgNl5Hk7+qJGnfnZQ51T1jtexjCY4/3UYcpnWdf7LP8GS50ENsX8HEdCkcBe6yi5r4OJXVlBW76BnUgRvXTuaKJsUccHI3FJB/x+vICXnfVQU8kc+QOHwv2oTxAghhBCiwyWGGXh8op3ZF4QzJtWI0wsvrnUx4YMGXlrnpMUdQoWr0aSNeR9/m3b/x4fg+wfA59M3lxB7SOEudLNxVx3TXl1OZaOT/qlRfHTDccRHyARiwSiicj2DvjmHqPJf8Jgj2DbpNcr6/UHvWEIIIYQABiQY+fCsMF6ZYqdXrIE6J/x7pZMJHzby9iYXDk+IFPCKAqc8AlMe1e4vfwFmXe+/lW6EOAZSuAtdrN5ZzWWvraC22c2QjBg+uF6K9qCkqiRt/4gB303D2lxGc1QPNp72BbVdTtI7mRBCCCF+Q1EUpmaZmXthOM9MstElUqGiWeXvSx1M/KCR1zc4aQ6VFvhxt8J5r4DBBJs+hf+dBnXFeqcSIU4Kd9Hhftyymytn/kyDU5uI7t3rRhMdJt3jg43JUU3vn26mx4r7MPhcVHc5hU2nzcIR3V3vaEIIIYQ4CKNB4YLeFuZPi+Cfx9tIDVfY3azy6HInx7/fyItrnKExBn7IJXDFLLDHQslaePVEKFyhdyoRwmRWedFhVFVl5pJ8/vXNVlQVTuiVwKtXykR0wSimeAE9lt2DxVGJz2CmaMjtlAy4ERQ5VyiEEEIEgtzatsduD0sy8t9T7Mzf6eGTHDdlTSpPrXIyY42TUzJNnNPTTEq4gXALZEUH4fFc94lww0L48HLYvQnePBNOfwpGXCNz9IgOJ4W76BAer4+HvtrMeysLAbh0dFceOWfAfku+ic7N6Kqn69onScl5H4Dm6F5sP/5ZmuMG6JxMCCGEEAC2PUf/t88/+nHbDi98nefh6zxP62Pzp4XRPSYIS4vYTPjDd9pM81u+hNm3Q8FiOONZsMfoHE6EkiD86xKBpq7Zza0frmVRTgWKAn87vR/XHZ+FImcqg4fqI3HH53Rd8wQWRxUAJX2vpWjYX/CZbDqHE0IIIcReqREGnp1kw+Fp+7kHoqoqubU+lhR72V7za6v9td+28MfBVs7rZSbCEmTHeNYIuOgtWPIczH8UNn0GRT9r4+Azx+udToQIKdxFu1pXVMst769hV00LdrOR5y8ZytQBKXrHEn4UXrWJrFX/ILJiDQAtUd3ZMfoR6lPH6ZxMCCGEEAeSGnFsPR67xxqZkmWmqN7Hx9lufinzsrNe5cElDv690sE5vcxc0NvMsCRj8DTUKAqccCdkTYDP/gg1+fDWmXD8nTDxHjBZ9E4ogpz0UxbtQlVVXl+8g4teXsaumha6xoXxyU1jpWgPIra6PHouuYNB35xDZMUavKYwdg6/l/VnfiNFuxBCCBECMqIMnN9Lm2D4+sEWukcbaHTDe1vcnP9FMyd/3MSLa5yUNAbRWuhdRsJNi2HoFaD6YPHT8MoJsHOZ3slEkJMWd+F3tc0u7v5kAz9u3Q3A6YNSeOKCwUTZZOb4YGCr20GXjTNIKPgaRdW+iCszz2TniPtxhcmJGSGEECIUndPTzP3HWVle4uWTbDdz893sqPXx1ConT69yMi7dyAW9zZyaZSbM3Mlb4a2RcO6L0GsyzLkbKrZpS8YNvUJbBz48Xu+EIghJ4S786tuNpTz45WYqG51YjAYeOLMfVx7XLXi6SYUqVSWqbBkpOe8SV/RDa8Fe3eUUiob8WSafE0IIIQSKojAu3cS4dBP/dNn4ZoebWdvdrCjxsrRYuzyw2MFp3c2c2UN7ntXYiY8RB5wHWRNh3sOw+k1Y9y5kfwMn/Q2GXw1GabQS/iOFu/CL8noHf/9yM3M3lwHQIzGc56cNY1CXaJ2TiWNhdNWTmDeLlJx3sdfvaH1cCnYhhBBCHEqEReHivhYu7muhqMHH5zluPstxsbNe5bMcN5/luIm0wEldTZyaZWZihqlztsSHxcFZ/4Ehl8HsO6B8M8y5C5a9ACc9AAPOB4OMThbHTgp3cUy8PpWPVhXxxLdbqXd4MBkU/nRiD6af1BOrKQjX8wwBBncjcUU/Er9zDjElizH4XAB4zBFUdj+Pst6X0xLTW+eUQgghhOgsMiIN/HmElVuHW1i928uX2918V+ChvFnly1wPX+Z6sJlgYhcTp2aZOKmbmWhrJyviu46BG3+CX/4Hi57UJq/77DpY+jxMegB6TZECXhwTKdzFUfspp4LH5mwle3cDAIPSo3nywsH0S43SOZk4UtbGXUSXLCK2ZBExJT9h8Dpbf9YU04fdvS+novu5+MwR+7+4vhjcR78W7H7qivy3LSGEEEJ0iNzaw5uAzmZUmNbXwkV9zGRX+1he4mVpsYfdzSrfFXj4rsCDSXEwMNHAiGQTw5ONdI1Ujn3YZZ2JcF8KWZU5x7adtmSMhovfho2fwvoPoGwjfDANYrrB4GnQczKYrAd/vTUS4nu0b0bRKUnhLo7Y1tJ6HvtmK4u3VwIQbTdz28m9uGpsN0xGOZPYGViaSoisWEtk+SpiShdjr8/f5+ctUVlUdTuTyswzDt26Xl8Ms25on5CH+lITQgghRECw7akmbp/vv5P4HhXWlftYV+5i5kZ/bTUOeJYFn95JlqHMXxs9fLU7tZb4RU+2/dxb10jxLvYjhbs4bGsKa/jvgrzW2eLNRoWrx2Zyy0k9iQmTtSsDkqpiaS4hrCabsNpswqu3EFmxBmtz6b5PU4w0JA6jNnUCNV1Oojm2n7ZeaVv2trQPvhjCk/yX22SF8AT/bU8IIYQQ7SI1wsCzk2w4PP7bZkWzj5wa7ZJf58Pzm8Z8A5ARpdAr1kj3aANdIhVMhraPWYobfby41kXTCQ9ArB/DHg63A4qWw46fwFG750EFEvtoLfTJg7SJ7OqKYPEz4Gzo2HyiU5DCXRySz6eyOLeS/y7IZWV+NaDVc6cPSuWvU/vQLT58v9fkVzbR5PTfB2JueaPfttWu/N1lHMBsh6j0w3qq0VlHWF3OniJ9257rHEzu/T/8VcVAU2QPGmL7Ux8/hLr4YXjNe/4vVaA67/Dy7e3WHp4E0YeXUwghhBDBJTXCvz0us2IMjE7Tbru8KlurfKwv97K+wktJo8rOepWd9dqxptkAvWIN9Is30j/eQM9YA5ZDzVQfnQHxOszDlDIQRlwLBUsgZy7s3qQtI1exDSwRkHEcxGZ2fC7RaUjhLg6ovN7BJ6t38fEvReysaga0FvbzhqVzw4Qe9Ew6wFhntKJ90tML2yWTzRzA3fDbs8v4+a+2Fu9GVz22hgJs9QXY91zbGgqwNezE7Kw54Mt9iglHVHeaIzJoKtpAo2qnETu+GgPU5MCOHOCTY8so3dqFEEII0Q4sRoUhSUaGJGnFdkWzjw0VPjZWeNla5aXeBVuqfGyp8vEZYDJAzxgD/eIN9Ikz0iPGQIQlQCa6M5ig+4napb4U8n6E3HnQXKnd3uvHh2DQxdB9IkR30SutCDBSuItWzS4PC7Mr+GJtMfO2leP1qQBEWE1cPDKDP56QRVqM/ZDb2NvSPn1ST9LbeO6RsJkNpEb7b3t+58cu4wavA7ujHFtdLrbCRdh+eRi7qxpbfQFmZ/UhX+sMS6U5tg/NMXsusX1oieqBarRAVS4U3ibd2oUQQgjRaSWGGTi5m4GTu5lQVZWSRpUtVV62VvnYWuWj1qmyrdrHtmofoB2XpoYrpIRrxfv2Gi+949pole8IUakw7EptGbnyzVC4HPIXa13pdyzULgDxPbVCP/N46DJKCvkQJoV7iKtrcbMwu5xvN5axMKcch/vXQUQjusUybVQGZw5OJcxyZLtKeoydrIT9u9EHvcPsMm7wOrE1l2JrLsHWXIK99XYpFlftb54IFP+4z2td1jgcYWk4wtNp2XPtCE/HEZaGz/S7kxs+oLZQuy3d2oUQQggRRBRFIT1SIT3SwCmZoKoqZU3qniLeS26tj7ImldI9F4A7Fji4Z5GDgQlGhiYZGZBgoH+81jKvSzFvMELKYO3S/SSYcwcMuwoqtkLxaq3hpSoXVr2uPT8yDbqMhPQRWvf75EEQmXzo96jK8++4eZn5XhdSuIcYh9vLmsIaluZWsiS3io27atnTsA5A17gwThuYwgUjutA7OVK/oEFA8bqwtZTtV5jbmkuxOqsO+Vq3OZoWWwKOukocWHBgoUW14MCMr9kIzV6oLAQKjzyYdGsXQgghRBBSFIXUCIXUCAMnddPKnAaXSl6Nj1VlHuYXeokwQ6Mb1uz2sma3t/W1vx8r3z/BSP94Y8euJ793YuBR10HaUGiphZ1Ltdb3opVQtgkaSmDrV9plr/BESOqvFdNx3SE2a891JjSUwozh/s8qM993OCncg5jXp7Kzqon1u2pZV1jLuqJatpTW4/aq+zyvR2I4pw1M5dSBKQxIizr2dTJDiMHjwNpYiG33CmxKFbb8D7B567E3l2JxVKKgHvS1HlMELWGpWut5WNpvbqfg3bteelMleJwH3cYRk27tQgghhAghkRaFoclaAT6/0MsHZ4YRblH2THbn29PN3kvD78bK75USrtAjxkD3aAM9YrWW+R4xBlLD/bC2fFvsMdD3DO0C4GqCknWw62ftevdmrTW+qQLyf9Iuvxe257gveQBEdQFbFNiiwRq15xKptfofLpn5XjdSuAeBRqeHwqpmCqubKahqIqesgZzyBrbvbsT52/Uz9kiMtHJ8zwTG90xgfM/4wB47HgAM7mZsjTuxNezcMxncztbLPsuqGYCK8n1e6zGF4bDvLchTf1Ocp+KxRLX95lJkCyGEEEL4jaIodI8x0j3GyHm9tcdUVWVXo8qWSm2s/JYqL1uqvOxq0LrelzV5WVrsBdyt2wkzQfcYA91jDHSNNNDlN5fUCAVre3S7t4RD5njtsperWetWX74VqndAdb52XZMPjjpt4jvQivzdmw/0L6IV8mFxYI/9tZjf5/Kbx8Li/f97icMSNIX7iy++yFNPPUVZWRlDhgxhxowZjB49Wu9YflPX4ubHLbvZUlpPWZ2DqkYnlU0uKhqc1LW4D/o6i9FA98Rw+qRE0js5kj7JkSRHWVvPEFY1unC4faE5Hh1QfG7MzeVYm0uwNJVibS7F2qTdtuy5fbDZ2vfymCNxhKXiqCnD0WUcjtjerUW6xxx9eOuhCyGEEEKIdpdbu3+j1l7pEQbSIwxM3tPNvtGlsqvRx64GH7saVO260Udpo0qzBzZV+thUuf/2FCA5XNEK+QgDKREKiXaFxDADSWEKiWEKiXYDkRaOvdXeEqaNd08fsf/Pmqsh5zv44ibofRqgao+1VGvXjlpQfdp16/ryh+mNqVrBb4nYt8i3RIB1z2OWSO222Q4mO5htYNpzMdt/c9sGDbvB69J6hxr8UKIG4Tj8oCjcP/roI+68805efvllxowZw/PPP8/UqVPJzs4mKcmPs2frqKbJxV2frD/i17m8PraVNbCt7NDdWRbcfWLnLt59XoyeJozuRoyuBozuRkzuRoyueszOasyOakyOyj23qzA7tGuTq+6wNu+2xuKI7PabS6Z2HZWJxxKjrXv+9W3Q5SyZ+E0IIYQQIsDY9lQ9t89v8et2bxxiptkNuxr3FPYNPlo8tLbU/4L3oK+1mSDRrpBgNxBrU4gmimj3VUSvqCc6KZ9ou5mYMDPRdjMRNhPhFhN2i5Fwiwmb2dB20R8WB0n9tNu9p2oz1P+WzwvO+l+L+ZYarQv8Ppd6cDWCo16779vTYOhxQKMD2H30/3jt7bxXYMgleqfwm6Ao3J999lmuv/56rr32WgBefvll5syZwxtvvMG9996rczr/SLH7GJngoaSyhoFJFlLDIc7sJc7qJcnqJcykne070JhqRf3tY+o+1zVNLn7YUoZpcwXEhoGq/u55HNFjMbUtXGzMoXvRdhJrLPu8r6J6UXxeUL17bnu069bHPSg+L4rqQVF9KD43Bq+z9aJ4nRi8jt885sDobtaKdU/TEf6L/spnsOAKS8EZnoorLBVneBqu8FScYanadXg63sPp1i6EEEIIIQJSaoSBZyfZcHj8s73iRh8vrnVxVg8LAxN/HSOuqirVDrW1hb6owcfuJpWKFh/lzSqVzdrtBhc4PFDUoFLUsLe4twGnws8NwJZDvr+iQJjZiN1iIsxiJMxixGIyYDYaMBsVLCYjFqOC2VWP2TUd86pILGEtWAxgNiqYDdqEfIpix6h0waB0QVHAoIDRBAYzKBF77itK688MDWUYNn2MYcwNGMLjUDxOFI8DPC1aMe92oHid2lLJHge4W1B8bq013esBnxM8bvC5wOtG8bq0n/m0/5jf1zL736eNn/96v3/BDroNafO/stNQVFU9+OxZnYDL5SIsLIxPP/2Uc889t/Xxq6++mtraWr788sv9XuN0OnE6f53wq66ujq5du1JUVERUVIAWaLW74KXj9E4R8LwYcRts2sVow2Ow4zSG4zRF4DBG4jRG4DRF7LmOxGmIwGm0g2I4tjduqoCCRZA5Ucb+CCGEEEIEuZ1NRt7KC+eqHs10jzzyswFuLzR4FRrdBprcCi1eBUdLM81Vu3BEZtJiCMPhVWjxKDh8Ck4vuHwKbp8MwTxc9w9p4bLzz9U7xiHV19eTkZFBbW0t0dHRh3xup29xr6ysxOv1kpy87/qFycnJbNu27YCvefzxx3n44Yf3ezwjI6NdMopQ8o3eAYQQQgghRAf5l94BxEHdDNx8rd4pDk9DQ0PwF+5H47777uPOO+9sve/z+aiuriY+Pl6WQtPJ3rNNAd3rQYQM2R9FoJF9UgQS2R9FoJF9UgSSI9kfVVWloaGBtLS0Nrfb6Qv3hIQEjEYju3fvOzHC7t27SUlJOeBrrFYrVqt1n8diYmLaK6I4AlFRUfKBKwKG7I8i0Mg+KQKJ7I8i0Mg+KQLJ4e6PbbW073WMA3v1Z7FYGDFiBPPmzWt9zOfzMW/ePMaOHatjMiGEEEIIIYQQ4th1+hZ3gDvvvJOrr76akSNHMnr0aJ5//nmamppaZ5kXQgghhBBCCCE6q6Ao3KdNm0ZFRQV///vfKSsrY+jQocydO3e/CetE4LJarTz00EP7DWEQQg+yP4pAI/ukCCSyP4pAI/ukCCTttT92+uXghBBCCCGEEEKIYNbpx7gLIYQQQgghhBDBTAp3IYQQQgghhBAigEnhLoQQQgghhBBCBDAp3IUQQgghhBBCiAAmhbvoMC+++CKZmZnYbDbGjBnDzz//fNDnbt68mQsuuIDMzEwUReH555/vuKAiJBzJ/vjaa69xwgknEBsbS2xsLJMnTz7k84U4GkeyT86aNYuRI0cSExNDeHg4Q4cO5Z133unAtCLYHcn++FsffvghiqJw7rnntm9AEXKOZJ988803URRln4vNZuvAtCLYHelnZG1tLdOnTyc1NRWr1Urv3r355ptvjug9pXAXHeKjjz7izjvv5KGHHmLNmjUMGTKEqVOnUl5efsDnNzc30717d5544glSUlI6OK0Idke6Py5cuJBLL72UBQsWsHz5cjIyMpgyZQrFxcUdnFwEqyPdJ+Pi4vjb3/7G8uXL2bBhA9deey3XXnst3333XQcnF8HoSPfHvQoKCrj77rs54YQTOiipCBVHs09GRUVRWlraetm5c2cHJhbB7Ej3R5fLxSmnnEJBQQGffvop2dnZvPbaa6Snpx/ZG6tCdIDRo0er06dPb73v9XrVtLQ09fHHH2/ztd26dVOfe+65dkwnQs2x7I+qqqoej0eNjIxU33rrrfaKKELMse6Tqqqqw4YNUx944IH2iCdCzNHsjx6PRx03bpz6+uuvq1dffbV6zjnndEBSESqOdJ/83//+p0ZHR3dQOhFqjnR/fOmll9Tu3burLpfrmN5XWtxFu3O5XKxevZrJkye3PmYwGJg8eTLLly/XMZkIRf7YH5ubm3G73cTFxbVXTBFCjnWfVFWVefPmkZ2dzYQJE9ozqggBR7s/PvLIIyQlJXHdddd1REwRQo52n2xsbKRbt25kZGRwzjnnsHnz5o6IK4Lc0eyPX331FWPHjmX69OkkJyczcOBAHnvsMbxe7xG9txTuot1VVlbi9XpJTk7e5/Hk5GTKysp0SiVClT/2x3vuuYe0tLR9PrSFOFpHu0/W1dURERGBxWLhjDPOYMaMGZxyyintHVcEuaPZH5csWcLMmTN57bXXOiKiCDFHs0/26dOHN954gy+//JJ3330Xn8/HuHHj2LVrV0dEFkHsaPbHHTt28Omnn+L1evnmm2948MEHeeaZZ3j00UeP6L1NR51aCCFC0BNPPMGHH37IwoULZaIboavIyEjWrVtHY2Mj8+bN484776R79+6ceOKJekcTIaShoYErr7yS1157jYSEBL3jCAHA2LFjGTt2bOv9cePG0a9fP1555RX++c9/6phMhCKfz0dSUhKvvvoqRqORESNGUFxczFNPPcVDDz102NuRwl20u4SEBIxGI7t3797n8d27d8vEc6LDHcv++PTTT/PEE0/w448/Mnjw4PaMKULI0e6TBoOBnj17AjB06FC2bt3K448/LoW7OCZHuj/m5eVRUFDAWWed1fqYz+cDwGQykZ2dTY8ePdo3tAhq/jiONJvNDBs2jNzc3PaIKELI0eyPqampmM1mjEZj62P9+vWjrKwMl8uFxWI5rPeWrvKi3VksFkaMGMG8efNaH/P5fMybN2+fs6FCdISj3R+ffPJJ/vnPfzJ37lxGjhzZEVFFiPDXZ6TP58PpdLZHRBFCjnR/7Nu3Lxs3bmTdunWtl7PPPptJkyaxbt06MjIyOjK+CEL++Iz0er1s3LiR1NTU9oopQsTR7I/jx48nNze39aQmQE5ODqmpqYddtAMyq7zoGB9++KFqtVrVN998U92yZYt6ww03qDExMWpZWZmqqqp65ZVXqvfee2/r851Op7p27Vp17dq1ampqqnr33Xera9euVbdv367XryCCyJHuj0888YRqsVjUTz/9VC0tLW29NDQ06PUriCBzpPvkY489pn7//fdqXl6eumXLFvXpp59WTSaT+tprr+n1K4ggcqT74+/JrPLC3450n3z44YfV7777Ts3Ly1NXr16tXnLJJarNZlM3b96s168ggsiR7o+FhYVqZGSkesstt6jZ2dnq7Nmz1aSkJPXRRx89oveVrvKiQ0ybNo2Kigr+/ve/U1ZWxtChQ5k7d27rxA6FhYUYDL92ACkpKWHYsGGt959++mmefvppJk6cyMKFCzs6vggyR7o/vvTSS7hcLi688MJ9tvPQQw/xj3/8oyOjiyB1pPtkU1MTf/rTn9i1axd2u52+ffvy7rvvMm3aNL1+BRFEjnR/FKK9Hek+WVNTw/XXX09ZWRmxsbGMGDGCZcuW0b9/f71+BRFEjnR/zMjI4LvvvuOOO+5g8ODBpKenc9ttt3HPPfcc0fsqqqqqfv1NhBBCCCGEEEII4TdyulQIIYQQQgghhAhgUrgLIYQQQgghhBABTAp3IYQQQgghhBAigEnhLoQQQgghhBBCBDAp3IUQQgghhBBCiAAmhbsQQgghhBBCCBHApHAXQgghhBBCCCECmBTuQgghhBBCCCFEAJPCXQghhBAB78QTT+T222/XO4YQQgihCynchRBCCB0sX74co9HIGWecoXeUDjN79mwmTpxIZGQkYWFhjBo1ijfffHOf5yxcuBBFUaitrdUloxBCCBGIpHAXQgghdDBz5kxuvfVWFi1aRElJSbu+l6qqeDyedn2PtsyYMYNzzjmH8ePHs3LlSjZs2MAll1zCTTfdxN13361LJpfLpcv7CiGEEEdKCnchhBCigzU2NvLRRx9x8803c8YZZ+zT6nzZZZcxbdq0fZ7vdrtJSEjg7bffBsDn8/H444+TlZWF3W5nyJAhfPrpp63P39tq/e233zJixAisVitLliwhLy+Pc845h+TkZCIiIhg1ahQ//vjjPu9VWlrKGWecgd1uJysri/fff5/MzEyef/751ufU1tbyxz/+kcTERKKiojjppJNYv379QX/foqIi7rrrLm6//XYee+wx+vfvT8+ePbnrrrt46qmneOaZZ1i5ciUFBQVMmjQJgNjYWBRF4Zprrmndjs/n469//StxcXGkpKTwj3/8Y5/3aSvXP/7xD4YOHcrrr79OVlYWNpsNgE8//ZRBgwZht9uJj49n8uTJNDU1Hfw/UAghhOhgUrgLIYQQHezjjz+mb9++9OnThyuuuII33ngDVVUBuPzyy/n6669pbGxsff53331Hc3Mz5513HgCPP/44b7/9Ni+//DKbN2/mjjvu4IorruCnn37a533uvfdennjiCbZu3crgwYNpbGzk9NNPZ968eaxdu5ZTTz2Vs846i8LCwtbXXHXVVZSUlLBw4UI+++wzXn31VcrLy/fZ7kUXXUR5eTnffvstq1evZvjw4Zx88slUV1cf8Pf99NNPcbvdB2xZv/HGG4mIiOCDDz4gIyODzz77DIDs7GxKS0v5z3/+0/rct956i/DwcFauXMmTTz7JI488wg8//HBEuXJzc/nss8+YNWsW69ato7S0lEsvvZQ//OEPbN26lYULF3L++ee3/n8IIYQQAUEVQgghRIcaN26c+vzzz6uqqqput1tNSEhQFyxYsM/9t99+u/X5l156qTpt2jRVVVXV4XCoYWFh6rJly/bZ5nXXXadeeumlqqqq6oIFC1RA/eKLL9rMMmDAAHXGjBmqqqrq1q1bVUBdtWpV68+3b9+uAupzzz2nqqqqLl68WI2KilIdDsc+2+nRo4f6yiuvHPA9brrpJjU6OvqgGQYPHqyedtpp+2SvqanZ5zkTJ05Ujz/++H0eGzVqlHrPPfccdq6HHnpINZvNanl5eevPV69erQJqQUHBQfMJIYQQejPpetZACCGECDHZ2dn8/PPPfP755wCYTCamTZvGzJkzOfHEEzGZTFx88cW89957XHnllTQ1NfHll1/y4YcfAlqLcXNzM6eccso+23W5XAwbNmyfx0aOHLnP/cbGRv7xj38wZ84cSktL8Xg8tLS0tLa4Z2dnYzKZGD58eOtrevbsSWxsbOv99evX09jYSHx8/D7bbmlpIS8v7xj/dQ5t8ODB+9xPTU1t7Q1wuLm6detGYmJi6/0hQ4Zw8sknM2jQIKZOncqUKVO48MIL9/mdhRBCCL1J4S6EEEJ0oJkzZ+LxeEhLS2t9TFVVrFYrL7zwAtHR0Vx++eVMnDiR8vJyfvjhB+x2O6eeeipAaxf6OXPmkJ6evs+2rVbrPvfDw8P3uX/33Xfzww8/8PTTT9OzZ0/sdjsXXnjhEU3S1tjYSGpqKgsXLtzvZzExMQd8Te/evamrq6OkpGSf3xu0Ew55eXmtY9sPxWw273NfURR8Pt8R5fr9v4nRaOSHH35g2bJlfP/998yYMYO//e1vrFy5kqysrDYzCSGEEB1BxrgLIYQQHcTj8fD222/zzDPPsG7dutbL+vXrSUtL44MPPgBg3LhxZGRk8NFHH/Hee+9x0UUXtRat/fv3x2q1UlhYSM+ePfe5ZGRkHPL9ly5dyjXXXMN5553HoEGDSElJoaCgoPXnffr0wePxsHbt2tbHcnNzqampab0/fPhwysrKMJlM+71/QkLCAd/3ggsuwGw288wzz+z3s5dffpmmpiYuvfRSACwWCwBer/cw/kV/dTS59lIUhfHjx/Pwww+zdu1aLBZLa48IIYQQIhBIi7sQQgjRQWbPnk1NTQ3XXXcd0dHR+/zsggsuYObMmdx0002ANrv8yy+/TE5ODgsWLGh9XmRkJHfffTd33HEHPp+P448/nrq6OpYuXUpUVBRXX331Qd+/V69ezJo1i7POOgtFUXjwwQdbW6wB+vbty+TJk7nhhht46aWXMJvN3HXXXdjtdhRFAWDy5MmMHTuWc889lyeffJLevXtTUlLCnDlzOO+88/brng/QtWtXnnzySe666y5sNhtXXnklZrOZL7/8kvvvv5+77rqLMWPGAFpXdkVRmD17Nqeffjp2u52IiIg2/22PJhfAypUrmTdvHlOmTCEpKYmVK1dSUVFBv3792nxPIYQQoqNIi7sQQgjRQWbOnMnkyZP3K9pBK9x/+eUXNmzYAGizy2/ZsoX09HTGjx+/z3P/+c9/8uCDD/L444/Tr18/Tj31VObMmdNm1+5nn32W2NhYxo0bx1lnncXUqVP3Gc8O8Pbbb5OcnMyECRM477zzuP7664mMjGxdOk1RFL755hsmTJjAtddeS+/evbnkkkvYuXMnycnJB33v22+/nc8//5zFixczcuRIBg4cyPvvv89LL73E008/3fq89PR0Hn74Ye69916Sk5O55ZZbDv2PusfR5oqKimLRokWcfvrp9O7dmwceeIBnnnmG00477bDeVwghhOgIiqrKeidCCCGEOLBdu3aRkZHBjz/+yMknn6x3HCGEECIkSeEuhBBCiFbz58+nsbGRQYMGUVpayl//+leKi4vJycnZb3I4IYQQQnQMGeMuhBBCiFZut5v777+fHTt2EBkZybhx43jvvfekaBdCCCF0JC3uQgghhBBCCCFEAJPJ6YQQQgghhBBCiAAmhbsQQgghhBBCCBHApHAXQgghhBBCCCECmBTuQgghhBBCCCFEAJPCXQghhBBCCCGECGBSuAshhBBCCCGEEAFMCnchhBBCCCGEECKASeEuhBBCCCGEEEIEsP8Hfzi6yIOSPd0AAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA+0AAAK9CAYAAABRvo1QAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAACN50lEQVR4nOzdd3RUdf7/8dednh5CCAkQIBQRC4KKig2CKGJZXXUta1/L/hRdUdevq669rWvXxdUtirq6rrq76NrpKnYUEESQ3gmQXibT7u+Pm5lkSAJJmGQmyfNxzpy5c++dez+TieWV96cYpmmaAgAAAAAACccW7wYAAAAAAICmEdoBAAAAAEhQhHYAAAAAABIUoR0AAAAAgARFaAcAAAAAIEER2gEAAAAASFCEdgAAAAAAEhShHQAAAACABEVoBwAAAAAgQRHaAQBd2l133SXDMDrkXuPGjdO4ceMir+fOnSvDMPTmm292yP0vueQSDRw4sEPu1VaVlZW6/PLLlZubK8MwNGXKlHg3qd3E+vvY9fcLANA9ENoBAJ3GtGnTZBhG5OHxeNSnTx9NnDhRTz31lCoqKmJyn82bN+uuu+7SwoULY3K9WErktrXEAw88oGnTpumqq67Syy+/rAsvvLDROeE/tOzpsbsAu3btWl166aUaPHiwPB6PcnNzdeyxx+rOO++MOu+ZZ57RtGnT2vx5Yv19/PDDD7rrrru0du3amFwPAND5GaZpmvFuBAAALTFt2jRdeumluueee1RQUCC/36+tW7dq7ty5mjFjhvr376+3335bI0aMiLwnEAgoEAjI4/G0+D7ffPONRo8erRdeeEGXXHJJi9/n8/kkSS6XS5JVaS8sLNQbb7yhs846q8XXaWvb/H6/QqGQ3G53TO7VHo444gg5HA59+umnzZ6zePFiLV68OPK6srJSV111lX7+85/rjDPOiOzv3bu3jj/++EbvX7lypUaPHq2kpCT96le/0sCBA7VlyxZ9++23ev/99+X1eiPnHnDAAcrOztbcuXPb9Hli/X28+eab+sUvfqE5c+Y0+qPErr9fAIDuwRHvBgAA0FqTJk3SoYceGnl9yy23aPbs2TrllFP0s5/9TMuWLVNSUpIkyeFwyOFo3//cVVdXKzk5Oe5hyul0xvX+LVFUVKT99ttvt+eMGDEi6g8vO3bs0FVXXaURI0boggsu2OM9Hn/8cVVWVmrhwoUaMGBAo/t3lFh/H/H+/QIAxAfd4wEAXcL48eN1++23a926dfrHP/4R2d/UmPYZM2bo6KOPVmZmplJTUzVs2DDdeuutkqzq+OjRoyVJl156aaQrdrgL9bhx43TAAQdowYIFOvbYY5WcnBx5b3NjjoPBoG699Vbl5uYqJSVFP/vZz7Rhw4aocwYOHNhkVb/hNffUtqbGUFdVVenGG29Ufn6+3G63hg0bpkceeUS7drQzDEPXXHONpk+frgMOOEBut1v777+/Pvjgg6Z/4LsoKirSZZddpt69e8vj8eiggw7Siy++GDkeHt+/Zs0avfvuu5G2t0c38FWrVqlfv36NArsk5eTkRLYHDhyopUuXat68eY263BcXF+u3v/2tDjzwQKWmpio9PV2TJk3SokWLoj5Ta7+P1157TYcccojS0tKUnp6uAw88UE8++aQkqyfJL37xC0lSYWFh5HrhXgBN/X55vV7ddddd2meffeTxeJSXl6czzjhDq1atauuPDwCQYKi0AwC6jAsvvFC33nqrPvroI11xxRVNnrN06VKdcsopGjFihO655x653W6tXLlS8+fPlyQNHz5c99xzj+644w5deeWVOuaYYyRJRx55ZOQaO3fu1KRJk3TuuefqggsuUO/evXfbrvvvv1+GYejmm29WUVGRnnjiCU2YMEELFy6M9AhoiZa0rSHTNPWzn/1Mc+bM0WWXXaaRI0fqww8/1E033aRNmzbp8ccfjzr/008/1X/+8x9dffXVSktL01NPPaUzzzxT69evV8+ePZttV01NjcaNG6eVK1fqmmuuUUFBgd544w1dcsklKi0t1XXXXafhw4fr5Zdf1vXXX69+/frpxhtvlCT16tWrxZ+/pQYMGKCZM2dq9uzZGj9+fLPnPfHEE7r22muVmpqq2267TZIi3+Xq1as1ffp0/eIXv1BBQYG2bdum5557TmPHjtUPP/ygPn36tPr7mDFjhs477zwdd9xxeuihhyRJy5Yt0/z583Xdddfp2GOP1W9+8xs99dRTuvXWWzV8+HBJijzvKhgM6pRTTtGsWbN07rnn6rrrrlNFRYVmzJihJUuWaPDgwW37AQIAEosJAEAn8cILL5iSzK+//rrZczIyMsxRo0ZFXt95551mw//cPf7446Ykc/v27c1e4+uvvzYlmS+88EKjY2PHjjUlmc8++2yTx8aOHRt5PWfOHFOS2bdvX7O8vDyy//XXXzclmU8++WRk34ABA8yLL754j9fcXdsuvvhic8CAAZHX06dPNyWZ9913X9R5Z511lmkYhrly5crIPkmmy+WK2rdo0SJTkvn00083uldDTzzxhCnJ/Mc//hHZ5/P5zDFjxpipqalRn33AgAHmySefvNvr7Wr79u2mJPPOO+9s0flLliwxk5KSTEnmyJEjzeuuu86cPn26WVVV1ejc/fffP+rnG+b1es1gMBi1b82aNabb7TbvueeeyL7WfB/XXXedmZ6ebgYCgWbb/sYbb5iSzDlz5jQ6tuvvwvPPP29KMh977LFG54ZCoWbvAQDoXOgeDwDoUlJTU3c7i3xmZqYk6a233lIoFGrTPdxuty699NIWn3/RRRcpLS0t8vqss85SXl6e3nvvvTbdv6Xee+892e12/eY3v4naf+ONN8o0Tb3//vtR+ydMmBBVnR0xYoTS09O1evXqPd4nNzdX5513XmSf0+nUb37zG1VWVmrevHkx+DQtt//++2vhwoW64IILtHbtWj355JM6/fTT1bt3b/31r39t0TXcbrdsNut/k4LBoHbu3BkZSvHtt9+2qV2ZmZmqqqrSjBkz2vT+Xf373/9Wdna2rr322kbHOmqZQwBA+yO0AwC6lMrKyqiAvKtzzjlHRx11lC6//HL17t1b5557rl5//fVWBfi+ffu2alKwoUOHRr02DENDhgxp92W91q1bpz59+jT6eYS7W69bty5qf//+/Rtdo0ePHiopKdnjfYYOHRoJuXu6T6xs3bo16lFTUxM5ts8+++jll1/Wjh07tHjxYj3wwANyOBy68sorNXPmzD1eOxQK6fHHH9fQoUPldruVnZ2tXr16afHixSorK2tTe6+++mrts88+mjRpkvr166df/epXLZ4zoCmrVq3SsGHD2n2iRQBAfBHaAQBdxsaNG1VWVqYhQ4Y0e05SUpI+/vhjzZw5UxdeeKEWL16sc845R8cff7yCwWCL7tOacegt1VxltKVtigW73d7kfjNBV4fNy8uLevzrX/9qdI7dbteBBx6oW265Rf/9738lSa+88soer/3AAw/ohhtu0LHHHqt//OMf+vDDDzVjxgztv//+be6hkZOTo4ULF+rtt9+OzDUwadIkXXzxxW26HgCge+BPswCALuPll1+WJE2cOHG359lsNh133HE67rjj9Nhjj+mBBx7Qbbfdpjlz5mjChAkx71r8008/Rb02TVMrV66MWtasR48eKi0tbfTedevWadCgQZHXrWlbeEK2ioqKqGr7jz/+GDkeCwMGDNDixYsVCoWiqu2xvs+udu1mvv/+++/2/PAygVu2bInsa+7n+eabb6qwsFB///vfo/aXlpYqOzt7j+9vjsvl0qmnnqpTTz1VoVBIV199tZ577jndfvvtGjJkSKuuN3jwYH355Zfy+/2dYrk/AEDbUGkHAHQJs2fP1r333quCggKdf/75zZ5XXFzcaN/IkSMlSbW1tZKklJQUSWoyRLfFSy+9FDXO/s0339SWLVs0adKkyL7Bgwfriy++kM/ni+x75513Gi0N15q2nXTSSQoGg/rTn/4Utf/xxx+XYRhR998bJ510krZu3RpV6Q4EAnr66aeVmpqqsWPHxuQ+u5owYULUIy8vT5L0ySefyO/3Nzo/PIfAsGHDIvtSUlKa/Fna7fZGPQzeeOMNbdq0KWpfa76PnTt3Rr222WyRP9y05XfvzDPP1I4dOxp9v1Li9o4AALQelXYAQKfz/vvv68cff1QgENC2bds0e/ZszZgxQwMGDNDbb78tj8fT7Hvvueceffzxxzr55JM1YMAAFRUV6ZlnnlG/fv109NFHS7ICdGZmpp599lmlpaUpJSVFhx9+uAoKCtrU3qysLB199NG69NJLtW3bNj3xxBMaMmRI1LJ0l19+ud58802deOKJOvvss7Vq1Sr94x//aLRsV2vaduqpp6qwsFC33Xab1q5dq4MOOkgfffSR3nrrLU2ZMiVmS4JdeeWVeu6553TJJZdowYIFGjhwoN58803Nnz9fTzzxxG7nGGgPDz30kBYsWKAzzjgjEoq//fZbvfTSS8rKytKUKVMi5x5yyCH685//rPvuu09DhgxRTk6Oxo8fr1NOOUX33HOPLr30Uh155JH6/vvv9corr0T1epBa931cfvnlKi4u1vjx49WvXz+tW7dOTz/9tEaOHBkZ/z9y5EjZ7XY99NBDKisrk9vt1vjx46PWlw+76KKL9NJLL+mGG27QV199pWOOOUZVVVWaOXOmrr76ap122mkx/KkCAOImrnPXAwDQCuEl38IPl8tl5ubmmscff7z55JNPRi0tFrbrkm+zZs0yTzvtNLNPnz6my+Uy+/TpY5533nnmihUrot731ltvmfvtt5/pcDiilvQaO3asuf/++zfZvuaWfPvnP/9p3nLLLWZOTo6ZlJRknnzyyea6desavf/RRx81+/bta7rdbvOoo44yv/nmm0bX3F3bdl1izDRNs6Kiwrz++uvNPn36mE6n0xw6dKj58MMPN1oSTJI5efLkRm1qbim6XW3bts289NJLzezsbNPlcpkHHnhgk8ugdcSSb/PnzzcnT55sHnDAAWZGRobpdDrN/v37m5dccom5atWqqHO3bt1qnnzyyWZaWpopKfKz9nq95o033mjm5eWZSUlJ5lFHHWV+/vnne/V9vPnmm+YJJ5xg5uTkmC6Xy+zfv7/561//2tyyZUvU9f7617+agwYNMu12e9Tyb03du7q62rztttvMgoIC0+l0mrm5ueZZZ53V6HMCADovwzTpPwUAAAAAQCJiTDsAAAAAAAmK0A4AAAAAQIIitAMAAAAAkKAI7QAAAAAAJChCOwAAAAAACYrQDgAAAABAgnLEuwGJIBQKafPmzUpLS5NhGPFuDgAAAACgizNNUxUVFerTp49stubr6YR2SZs3b1Z+fn68mwEAAAAA6GY2bNigfv36NXuc0C4pLS1NkvXDSk9Pj3NrAAAAAABdXXl5ufLz8yN5tDmEdinSJT49PZ3QDgAAAADoMHsaos1EdAAAAAAAJChCOwAAAAAACYrQDgAAAABAgmJMOwAAAADsgWmaCgQCCgaD8W4KOgm73S6Hw7HXy4oT2gEAAABgN3w+n7Zs2aLq6up4NwWdTHJysvLy8uRyudp8DUI7AAAAADQjFAppzZo1stvt6tOnj1wu115XTtH1maYpn8+n7du3a82aNRo6dKhstraNTie0AwAAAEAzfD6fQqGQ8vPzlZycHO/moBNJSkqS0+nUunXr5PP55PF42nQdJqIDAAAAgD1oa5UU3Vssfm/4zQMAAAAAIEER2gEAAAAASFCEdgAAAABAq0ybNk2ZmZl7fR3DMDR9+vS9vk5XRmgHAAAAgG7okksu0emnnx7vZmAPCO0AAAAAACQoQjsAAAAAIMpjjz2mAw88UCkpKcrPz9fVV1+tysrKRudNnz5dQ4cOlcfj0cSJE7Vhw4ao42+99ZYOPvhgeTweDRo0SHfffbcCgUBHfYwugdAOAAAAAIhis9n01FNPaenSpXrxxRc1e/Zs/d///V/UOdXV1br//vv10ksvaf78+SotLdW5554bOf7JJ5/ooosu0nXXXacffvhBzz33nKZNm6b777+/oz9Op0ZoBwAAAABEmTJligoLCzVw4ECNHz9e9913n15//fWoc/x+v/70pz9pzJgxOuSQQ/Tiiy/qs88+01dffSVJuvvuu/W73/1OF198sQYNGqTjjz9e9957r5577rl4fKROyxHvBgAAAAAAEsvMmTP14IMP6scff1R5ebkCgYC8Xq+qq6uVnJwsSXI4HBo9enTkPfvuu68yMzO1bNkyHXbYYVq0aJHmz58fVVkPBoONroPdI7QDAAAAACLWrl2rU045RVdddZXuv/9+ZWVl6dNPP9Vll10mn8/X4rBdWVmpu+++W2eccUajYx6PJ9bN7rII7QAAAACAiAULFigUCunRRx+VzWaNqN61a7wkBQIBffPNNzrssMMkScuXL1dpaamGDx8uSTr44IO1fPlyDRkypOMa3wUR2gEAAACgmyorK9PChQuj9mVnZ8vv9+vpp5/Wqaeeqvnz5+vZZ59t9F6n06lrr71WTz31lBwOh6655hodccQRkRB/xx136JRTTlH//v111llnyWazadGiRVqyZInuu+++jvh4XQIT0QEAAABANzV37lyNGjUq6vHyyy/rscce00MPPaQDDjhAr7zyih588MFG701OTtbNN9+sX/7ylzrqqKOUmpqqf/3rX5HjEydO1DvvvKOPPvpIo0eP1hFHHKHHH39cAwYM6MiP2OkZpmma8W5EvJWXlysjI0NlZWVKT0+Pd3MAAAAAJAiv16s1a9aooKCAcdhotd39/rQ0h1JpBwAAAAAgQRHaAQAAAABIUIR2AAAAAAASFLPHAwAAAADirtYfVKjBlGs2w5DbaY9jixIDoR0AAAAAEFe1/qCWb6totH9Y77RuH9wJ7QAAAACAuApX2HPSPHLaDfmDpooqvFGV9+6K0A4AAAAASAhOuyG3wy4pGO+mJAwmogMAAAAAIEFRaQcAAACAVtpUWqOSKl+H3a9Hikt9M5M67H5IHIR2AAAAAGiFTaU1Ou7RufL6Qx12T4/Tplk3juuw4L527VoVFBTou+++08iRI1v0nmnTpmnKlCkqLS2NWTvWrV2r4cOGtqodXQ2hHQAAAABaoaTKJ68/pMmFQzokRG8qrdHUOStVUuVr9f02bNigO++8Ux988IF27NihvLw8nX766brjjjvUs2fPZt+Xn5+vLVu2KDs7u8X3Ouecc3TSSSe1qn3YM0I7AAAAALRB38wkFWSnxLsZzVq9erXGjBmjffbZR//85z9VUFCgpUuX6qabbtL777+vL774QllZWY3e5/P55HK5lJub26r7JSUlKSmJLvyxxkR0AAAAANAFTZ48WS6XSx999JHGjh2r/v37a9KkSZo5c6Y2bdqk2267TZI0cOBA3XvvvbrooouUnp6uK6+8UmvXrpVhGFq4cGHkem+//baGDh0qj8ejwsJCvfjiizIMI9Idftq0acrMzIycf9ddd2nkyJF6+eWXNXDgQGVkZOjcc89VRUX9euwffPCBjj76aOXlZOvYAwfpvLN+rtWrV3XEj6fTILQDAAAAQBdTXFysDz/8UFdffXWj6ndubq7OP/98/etf/5JZtw76I488ooMOOkjfffedbr/99kbXW7Nmjc466yydfvrpWrRokX79619HQv/urFq1StOnT9c777yjd955R/PmzdMf/vCHyPGqqirdcMMN+vSzL/SX196SzWbTL885S6FQx80XkOjoHg8AAAAAXcxPP/0k0zQ1fPjwJo8PHz5cJSUl2r59uyRp/PjxuvHGGyPH165dG3X+c889p2HDhunhhx+WJA0bNkxLlizR/fffv9t2hEIhTZs2TWlpaZKkCy+8ULNmzYq878wzz5Qk1fgCCqXn6qlnntOwgn5a/uMypfcZ1PoP3gVRaQcAAACALipcSd+TQw89dLfHly9frtGjR0ftO+yww/Z43YEDB0YCuyTl5eWpqKgo8vqnn37Seeedp/2G7aMjh/fXwQcMkyRt3LChRe3uDgjtAAAAANDFDBkyRIZhaNmyZU0eX7ZsmXr06KFevXpJklJS2mdCPafTGfXaMIyoru+nnnqqiouLNfXPz+ofb8/Qh3M+kST5fb52aU9nRGgHAAAAgC6mZ8+eOv744/XMM8+opqYm6tjWrVv1yiuv6JxzzpFhGC263rBhw/TNN99E7fv666/3qo07d+7U8uXL9fvf/16F48dr0NBhKi0p2atrdkWMaQcAAACANthUWrPnk+J4nz/96U868sgjNXHiRN13331RS7717dt3j+PRG/r1r3+txx57TDfffLMuu+wyLVy4UNOmTZOkFgf/XfXo0UM9e/bUX/7yF/Xo2Utffr9Cf3743jZdqysjtAMAAABAK/RIccnjtGnqnJUddk+P06YeKa5WvWfo0KH65ptvdOedd+rss89WcXGxcnNzdfrpp+vOO+9sco325hQUFOjNN9/UjTfeqCeffFJjxozRbbfdpquuukput7u1H0eSZLPZ9Nprr+k3v/mNDj14pAYMGqKHH3tcp006oU3X66oMs6UzE3Rh5eXlysjIUFlZmdLT0+PdHAAAAAAJwuv1as2aNSooKJDH44ns31Rao5Kqjht33SPFpb6ZSXs+sQPdf//9evbZZ7UhBpPG1fgC+qmoUn0zk+R22FUbCGpTaY2G5qQqydV5a83N/f5ILc+hnffTAwAAAECc9M1MSrgQ3d6eeeYZjR49Wj179tT8+fP18MMP65prrol3s7o8QjsAAAAAYI9++ukn3XfffSouLlb//v1144036pZbbol3s7o8QjsAAAAAYI8ef/xxPf744/FuRrfDkm8AAAAAACQoQjsAAAAAAAmK0A4AAAAAQIIitAMAAAAAkKAI7QAAAAAAJChmjwcAAACA1irdIFXv7Lj7JfeUMvM77n5IGIR2AAAAAGiN0g3S1NGSv6bj7ulMkiZ/TXDfCwMHDtSUKVM0ZcqUmF1z3LhxGjlypJ544omYXXNXhHYAAAAAaI3qnVZgP+ZGKaMDQnTZBumTR637tjC0P/vss7rppptUUlIih8OKfZWVlerRo4eOOuoozZ07N3Lu3LlzVVhYqJUrV2rw4MHNXjN8XklJiTIzM/fmE6EVCO0AAAAA0BYZ+VLPIfFuRZMKCwtVWVmpb775RkcccYQk6ZNPPlFubq6+/PJLeb1eeTweSdKcOXPUv3//3Qb2WDJNU8FgMPLHhPbm8/nkcrk65F7tgYnoAAAAAKCLGTZsmPLy8hpV1E877TQVFBToiy++iNpfWFiol19+WYceeqjS0tKUm5urX/7ylyoqKpIkrV27VoWFhZKkHj16yDAMXXLJJZKkUCikBx98UAUFBUpKStJBBx2kN998M+r6hmHo/fff1yGHHCK3261PP/1U48aN07XXXqspU6aoT+9eKhy1j1564e+qqqrStf/vCo3ZN18HDN9X77//fuRawWBQl112WeRew4YN05NPPhn12S+55BKdfvrpuv/++9WnTx8NGzasyZ/R3/72N2VmZmrWrFmSpCVLlmjSpElKTU1V7969deGFF2rHjh2R86uqqnTRRRcpNTVVeXl5evTRR9vwzbQeoR0AAAAAuqDCwkLNmTMn8nrOnDkaN26cxo4dG9lfU1OjL7/8UoWFhfL7/br33nu1aNEiTZ8+XWvXro0E8/z8fP373/+WJC1fvlxbtmyJhOUHH3xQL730kp599lktXbpU119/vS644ALNmzcvqj2/+93v9Ic//EHLli3TiBEjJEkvvviisrOz9fGnn+m8S67UTdf/Rhedf65GH36EXnt/rsYdd5wuvPBClZRVSLL+QNCvXz+98cYb+uGHH3THHXfo1ltv1euvvx51r1mzZmn58uWaMWOG3nnnnUY/mz/+8Y/63e9+p48++kjHHXecSktLNX78eI0aNUrffPONPvjgA23btk1nn3125D033XST5s2bp7feeksfffSR5s6dq2+//XZvvqIWoXs8AAAAAHRBhYWFmjJligKBgGpqavTdd99p7Nix8vv9evbZZyVJn3/+uWpra1VYWKj+/ftH3jto0CA99dRTGj16tCorK5WamqqsrCxJUk5OTmRMe21trR544AHNnDlTY8aMibz3008/1XPPPaexY8dGrnnPPffo+OOPj2rjQQcdpN///veq8QV02TXXa9qfn1DPnj11ya8u14aSap3//27Q83/9i96d94V+MWm83E6n7r777sj7CwoK9Pnnn+v111+PCtgpKSn629/+1mS3+Jtvvlkvv/yy5s2bp/3331+S9Kc//UmjRo3SAw88EDnv+eefV35+vlasWKE+ffro73//u/7xj3/ouOOOk2T9waFfv36t/2JaidAOAAAAAF3QuHHjVFVVpa+//lolJSXaZ5991KtXL40dO1aXXnqpvF6v5s6dq0GDBql///5asGCB7rrrLi1atEglJSUKhUKSpPXr12u//fZr8h4rV65UdXV1ozDu8/k0atSoqH2HHnpoo/eHK+6SZLfb1SMrS/vtf6CcdpvyeySrT8YASVLxju0KmaYkaerUqXr++ee1fv161dTUyOfzaeTIkVHXPfDAA5sM7I8++qiqqqr0zTffaNCgQZH9ixYt0pw5c5SamtroPatWrYrc5/DDD4/sz8rKarbrfSwR2gEAAACgCxoyZIj69eunOXPmqKSkJFL17tOnj/Lz8/XZZ59pzpw5Gj9+vKqqqjRx4kRNnDhRr7zyinr16qX169dr4sSJ8vl8zd6jsrJSkvTuu++qb9++UcfcbnfU65SUlEbvdzqdUa8Nw5DTacVUp90mp90a0R0KWYH9tdde029/+1s9+uijGjNmjNLS0vTwww/ryy+/3OO9JOmYY47Ru+++q9dff12/+93voj7HqaeeqoceeqjRe/Ly8rRy5comr9cR4jqm/cEHH9To0aOVlpamnJwcnX766Vq+fHnUOePGjZNhGFGP//f//l/UOevXr9fJJ5+s5ORk5eTk6KabblIgEOjIjwIAAAAACaewsFBz587V3LlzNW7cuMj+Y489Vu+//76++uorFRYW6scff9TOnTv1hz/8Qcccc4z23XffyCR0YeHKdTAYjOzbb7/95Ha7tX79eg0ZMiTqkZ8f++Xw5s+fryOPPFJXX321Ro0apSFDhmjVqlUtfv9hhx2m999/Xw888IAeeeSRyP6DDz5YS5cu1cCBAxt9jpSUFA0ePFhOpzPqjwMlJSVasWJFTD9fU+JaaZ83b54mT56s0aNHKxAI6NZbb9UJJ5ygH374IeovI1dccYXuueeeyOvk5OTIdjAY1Mknn6zc3Fx99tln2rJliy666CI5nc6o8QgAAAAAEFNlGxL+PoWFhZo8ebL8fn/U+PKxY8fqmmuukc/nU2FhoRwOh1wul55++mn9v//3/7RkyRLde++9UdcaMGCADMPQO++8o5NOOklJSUlKS0vTb3/7W11//fUKhUI6+uijVVZWpvnz5ys9PV0XX3xxm9velKFDh+qll17Shx9+qIKCAr388sv6+uuvVVBQ0OJrHHnkkXrvvfc0adIkORwOTZkyRZMnT9Zf//pXnXfeefq///s/ZWVlaeXKlXrttdf0t7/9Tampqbrssst00003qWfPnsrJydFtt90mm6396+BxDe0ffPBB1Otp06YpJydHCxYs0LHHHhvZn5ycrNzc3Cav8dFHH+mHH37QzJkz1bt3b40cOVL33nuvbr75Zt11111NjmOora1VbW1t5HV5eXmMPhEAAACALi+5p+RMkj7pmCW/JFn3S+7Z6rcVFhaqpqZG++67r3r37h3ZP3bsWFVUVESWhpOsPHbrrbfqqaee0sEHH6xHHnlEP/vZzyLv6du3r+6++2797ne/06WXXqqLLrpI06ZN07333qtevXrpwQcf1OrVq5WZmamDDz5Yt956695/7l38+te/1nfffadzzjlHhmHovPPO09VXXx21LFxLHH300Xr33Xd10kknyW6369prr9X8+fN1880364QTTlBtba0GDBigE088MRLMH3744Ug3+rS0NN14440qKyuL+WfclWGadaP5E8DKlSs1dOhQff/99zrggAMkWd3jly5dKtM0lZubq1NPPVW33357pNp+xx136O2339bChQsj11mzZo0GDRqkb7/9ttHkB5J01113Rc04GFZWVqb09PT2+XAAAAAAOh2v16s1a9aooKBAHo+n/kDpBql6Z8c1JLmnlBn77uaJosYX0E9FleqbmSS3wx51rDYQ1KbSGg3NSVWSq3NNy9bs74+s4nFGRsYec2jCfOJQKKQpU6boqKOOigR2SfrlL3+pAQMGqE+fPlq8eLFuvvlmLV++XP/5z38kSVu3bo36i5GkyOutW7c2ea9bbrlFN9xwQ+R1eXl5u4y3AAAAANBFZeZ36RCNxJEwoX3y5MlasmSJPv3006j9V155ZWT7wAMPVF5eno477jitWrVKgwcPbtO93G53o5kMAQAAAABINHGdPT7smmuu0TvvvKM5c+bscXH68Lp44Sn3c3NztW3btqhzwq+bGwcPAAAAAEBnENfQbpqmrrnmGv33v//V7NmzWzTjX3jseniyhDFjxuj777+PWo5gxowZSk9P13777dcu7QYAAAAAoCPEtXv85MmT9eqrr+qtt95SWlpaZAx6RkaGkpKStGrVKr366qs66aST1LNnTy1evFjXX3+9jj32WI0YMUKSdMIJJ2i//fbThRdeqD/+8Y/aunWrfv/732vy5Ml0gQcAAAAQEwk0fzc6kVj83sS10v7nP/9ZZWVlGjdunPLy8iKPf/3rX5Ikl8ulmTNn6oQTTtC+++6rG2+8UWeeeab+97//Ra5ht9v1zjvvyG63a8yYMbrgggt00UUXRa3rDgAAAABt4XQ6JUnV1dVxbgk6o/DvTfj3qC3iWmnf018d8vPzNW/evD1eZ8CAAXrvvfdi1SwAAAAAkGQVCTMzMyPDcZOTk2UYRpxb1fXU+gIyAz75am1SMLq27AuEZAZ8qvV6ZYQSZi713TJNU9XV1SoqKlJmZqbsdvue39SMzvGJAQAAACBOwhNcN5xHC7HlC4RUVFErf7JTDlv0H0UCIVMl1X6pwi2XIyHmUm+xzMzMvZ4gndAOAAAAALthGIby8vKUk5Mjv98f7+Z0SSu2Veiutxfo+gn7qG+PpKhjm0pq9PicFfrzBYeooHdanFrYek6nc68q7GGEdgAAAABoAbvdHpMQhsZsjlptqgjKJ4dkd0Ud88mvTRVB2RwueTyeOLUwfjpX3wIAAAAAALoRQjsAAAAAAAmK0A4AAAAAQIIitAMAAAAAkKAI7QAAAAAAJChCOwAAAAAACYrQDgAAAABAgiK0AwAAAACQoAjtAAAAAAAkKEI7AAAAAAAJitAOAAAAAECCIrQDAAAAAJCgCO0AAAAAACQoQjsAAAAAAAmK0A4AAAAAQIIitAMAAAAAkKAI7QAAAAAAJChCOwAAAAAACYrQDgAAAABAgiK0AwAAAACQoAjtAAAAAAAkKEI7AAAAAAAJitAOAAAAAECCIrQDAAAAAJCgCO0AAAAAACQoQjsAAAAAAAmK0A4AAAAAQIIitAMAAAAAkKAI7QAAAAAAJChCOwAAAAAACYrQDgAAAABAgiK0AwAAAACQoAjtAAAAAAAkKEI7AAAAAAAJitAOAAAAAECCIrQDAAAAAJCgCO0AAAAAACQoQjsAAAAAAAmK0A4AAAAAQIIitAMAAAAAkKAI7QAAAAAAJChCOwAAAAAACYrQDgAAAABAgiK0AwAAAACQoAjtAAAAAAAkKEI7AAAAAAAJyhHvBgAAAAAAsCcriyoj2yluhwqyU+LYmo5DaAcAAAAAJCyP0+ogPuVfC6P2z/ntuG4R3AntAAAAAICElZeRpMfOPkhef0iStKm0RlPnrFRVbSDOLesYhHYAAAAAQELLy0iKdxPihonoAAAAAABIUIR2AAAAAAASFKEdAAAAAIAERWgHAAAAACBBEdoBAAAAAEhQhHYAAAAAABIUoR0AAAAAgARFaAcAAAAAIEER2gEAAAAASFCEdgAAAAAAEhShHQAAAACABEVoBwAAAAAgQRHaAQAAAABIUIR2AAAAAAASFKEdAAAAAIAERWgHAAAAACBBEdoBAAAAAEhQhHYAAAAAABIUoR0AAAAAgARFaAcAAAAAIEER2gEAAAAASFCEdgAAAAAAEhShHQAAAACABEVoBwAAAAAgQRHaAQAAAABIUIR2AAAAAAASFKEdAAAAAIAERWgHAAAAACBBEdoBAAAAAEhQhHYAAAAAQEL4qahCoZAZ72YkFEI7AAAAACAhvDB/reav2hHvZiQUQjsAAAAAIGGs2FYR7yYkFEI7AAAAACBhbCiuiXcTEgqhHQAAAAAQV6ZZP459fXF11OvujtAOAAAAAIirGn8wantHZW0cW5NYHPFuAAAAAACge1qzo0pVtQF9u740av+64mr1SvPEp1EJhtAOAAAAAOhwa3ZUqfCRuU0eW7+zWocOyOrYBiUouscDAAAAADpcVW1AkjS5cIh+cUi/qGMbSqrj0aSERGgHAAAAAMRN38wk2W2GJCkzySnJmowOFkI7AAAAACCuSqr9kqSD8jMlSVvKvPIFQnFsUeIgtAMAAAAA4qqk2idJKshOUZrHIdOUNtJFXhKhHQAAAAAQZyVVVmjPSnapf1ayJLrIhxHaAQAAAABxFa6090hxRkL7BkK7JEI7AAAAACCOQqapkiprTHuPZJfyqbRHIbQDAAAAAOKm2hdU0DRlSMpIrq+0ryuulmma8W1cAohraH/wwQc1evRopaWlKScnR6effrqWL18edY7X69XkyZPVs2dPpaam6swzz9S2bduizlm/fr1OPvlkJScnKycnRzfddJMCgUBHfhQAAAAAQBuU11hV9vQkpxw2m/r1SJJhSBXegMrqjnVncQ3t8+bN0+TJk/XFF19oxowZ8vv9OuGEE1RVVRU55/rrr9f//vc/vfHGG5o3b542b96sM844I3I8GAzq5JNPls/n02effaYXX3xR06ZN0x133BGPjwQAAAAAaIVyb7hrvLVGu9thV166RxJd5CXJEc+bf/DBB1Gvp02bppycHC1YsEDHHnusysrK9Pe//12vvvqqxo8fL0l64YUXNHz4cH3xxRc64ogj9NFHH+mHH37QzJkz1bt3b40cOVL33nuvbr75Zt11111yuVzx+GgAAAAAgBaoqLF6SWel1Ge3/KxkbS7zan1xtUb0y4xTyxJDQo1pLysrkyRlZWVJkhYsWCC/368JEyZEztl3333Vv39/ff7555Kkzz//XAceeKB69+4dOWfixIkqLy/X0qVLm7xPbW2tysvLox4AAAAAgI5XX2mvD+0s+1YvYUJ7KBTSlClTdNRRR+mAAw6QJG3dulUul0uZmZlR5/bu3Vtbt26NnNMwsIePh4815cEHH1RGRkbkkZ+fH+NPAwAAAABoiXBozyS0NylhQvvkyZO1ZMkSvfbaa+1+r1tuuUVlZWWRx4YNG9r9ngAAAACAxsqb6B4fDu2bSmoUCIXi0q5EkRCh/ZprrtE777yjOXPmqF+/fpH9ubm58vl8Ki0tjTp/27Ztys3NjZyz62zy4dfhc3bldruVnp4e9QAAAAAAdLxdJ6KTpOw0t5KcdgVCpraUeuPVtIQQ19BumqauueYa/fe//9Xs2bNVUFAQdfyQQw6R0+nUrFmzIvuWL1+u9evXa8yYMZKkMWPG6Pvvv1dRUVHknBkzZig9PV377bdfx3wQAAAAAECblHutSnuPBpV2m2EoPytJkrShpHt3kY/r7PGTJ0/Wq6++qrfeektpaWmRMegZGRlKSkpSRkaGLrvsMt1www3KyspSenq6rr32Wo0ZM0ZHHHGEJOmEE07QfvvtpwsvvFB//OMftXXrVv3+97/X5MmT5Xa74/nxAAAAAAB7UFVb1z0+OXrlr/weyVqxrVLri6t15OB4tCwxxDW0//nPf5YkjRs3Lmr/Cy+8oEsuuUSS9Pjjj8tms+nMM89UbW2tJk6cqGeeeSZyrt1u1zvvvKOrrrpKY8aMUUpKii6++GLdc889HfUxAAAAAAB7wW4zlOqJjqf9eljj2rt79/i4hnbTNPd4jsfj0dSpUzV16tRmzxkwYIDee++9WDYNAAAAANBBeiQ7ZTOMqH0pbrskyesPxqNJCSMhJqIDAAAAAHRfPXbpGi9JbocV2msDzB4PAAAAAEDcNJyELsztsOKqN0ClHQAAAACAuNl1EjpJcjutuFrrp9IOAAAAAEDcNFyjPSzcPZ5KOwAAAAAAcdRU93iPg0q7RGgHAAAAAMRZkxPROcMT0QVbtPJYV0VoBwAAAADE1e4moguZUiBEaAcAAAAAoMPU+OrHqjc5pt1ZH1e7cxd5QjsAAAAAoMMVV/skSS6HTckuR6PjDptNdpshyeoi310R2gEAAAAAHW5nZa0kKd3TOLCHeZzhtdqptAMAAAAA0GGKq6xKe5qncdf4sPCyb7V+Ku0AAAAAAHSYnXWhPSOp+dAeWfaNSjsAAAAAAB2nvtLefPf4hsu+dVeEdgAAAABAhyuutEJ7+m67x9eNae/Gs8c3/ycNAAAAAADaSbh7fHq4e3z5JslfU3+CMykS2rtzpZ3QDgAAAADocDurGsweX75J+s+Vjc5x935AUvdep53QDgAAAADocOU1AUlSissh+cutnSPOllJypKoiafHrctussN6dl3wjtAMAAAAAOlQwZKqmbhk3j8tefyAlR8roG3npthuSunf3eCaiAwAAAAB0qEpvILIdXtatKR5HXWjvxt3jCe0AAAAAgA5V7vVHth325mOpu64IT6UdAAAAAIAO0jC07w6VdkI7AAAAAKCDhSeh2xN3OLR344noCO0AAAAAgA7V0kp7eCI6L93jAQAAAADoGBXeVlba6R4PAAAAAEDHKK+h0t5ShHYAAAAAQIdiIrqWI7QDAAAAADpUi7vH28MT0VFpBwAAAACgQ7S4e7zDemb2eAAAAAAAOkhrZ4+nezwAAAAAAB2ktbPHB01TgWD3DO6EdgAAAABAh2rtRHSS5O2mXeQJ7QAAAACADlVe07JKu8NmyG4Ld5HvnpPREdoBAAAAAB2qooWVdknyOKzY2l0noyO0AwAAAAA6jGmaKm/hmHZJcjvtkgjtAAAAAAC0u2pfUMGQ2eLz3XWVdi/d4wEAAAAAaF/hmeNtxh5OrOOOdI8ntAMAAAAA0K7CM8enuh0tOt/tqOse303Xaie0AwAAAAA6THmNFdpTWhranXXd4xnTDgAAAABA+wp3j0922Vt0vidcaad7PAAAAAAA7SvcPb7FlfbwmHa6xwMAAAAA0L7C3eNbPKbdyUR0AAAAAAB0iPJWdo+PTETHmHYAAAAAANpXa7vHe8IT0XXT7vEt+ykBAAAAABAD5TVWpT3SPb50g2Tb0uz57m4+ER2hHQAAAADQYSrqKu3JoUprxycPS8ZWa9vhbnR+/Zh2Ku0AAAAAALSr8Jj2VJsV3jX0eKlXhhXYU7IbnR+utHv9VNoBAAAAAGhX4dnjU5yGtcOTJWXkNnu+hyXfAAAAAADoGJHu8eHQvgcs+QYAAAAAQAeJdI93tSyOsuQbAAAAAAAdpFH3+D1wO7r3RHSEdgAAAABAh6gNBCPhO8XV0u7xdZX2bjoRHaEdAAAAANAhKuq6xktSkqNloT08EZ2XSjsAAAAAAO0n3DU+ze2Q3dbS7vFWpT0YMhUIdb/gTmgHAAAAAHSIcKU9zdPy1cc9zvrY2h2XfSO0AwAAAAA6xIptFZIkl8OmlcWBPZxtcdhtshtWVb47TkZHaAcAAAAAtLs1O6p005uLJUlrd1ZrykclkiSPw9zjeyNrtXfDyegI7QAAAACAdldVW19Z3zc3TQ8UZuoxx1TlJe25eu7uxpPRtXwgAQAAAAAAMZCd6lZBD0lGcYvOtyaj86vWH4wsAdddUGkHAAAAAHSoZFfrgne4e3x3rLQT2gEAAAAAHSrZ1bpO3566Zd9qA4xpBwAAAACgXaW4W1lpd4QnoqPSDgAAAABAu2ptpT0yezyVdgAAAAAA2lerx7RHusdTaQcAAAAAoF21PrTXTURH93gAAAAAANpXiruVE9E5mYgOAAAAAIAOkdzKtdbrx7RTaQcAAAAAIOaCITOyndzKSnt4TLvXT6UdAAAAAICYq/HVB+7Wjmn3OKi0AwAAAADQbqp8AUmSw2bIaW9dFI10j6fSDgAAAABA7FXVWqE9qZXj2SWWfAMAAAAAoF1V1oV2T5tCO93jAQAAAABoN9V1Y9o9ztbHUHd4yTe6xwMAAAAAEHtVMai0e6m0AwAAAAAQe3vTPd4TGdNOpR0AAAAAgJir7x7fhtAemT2eSjsAAAAAADFXGZk9vg1j2usq7YGQqWDIjGm7Eh2hHQAAAADQ7vZqTHuDoO8Pdq9qO6EdAAAAANDu9qZ7vMNmyGZY291t2TdHvBsAAAAAAOj62jQRXdkGSZLhTJLbYVeNPyg/oR0AAAAAgNiqas2Ydofbev74kcgut/1O1fglXzfrHk9oBwAAAAC0u1Z1j0/Jlo65UQrUSlVF0uLX5al7m49KOwAAAAAAsdXqiehSsqNeuh3WoPbuVmlnIjoAAAAAQLsyTbNtY9obcNvrQns3q7QT2gEAAAAA7arGH1R4eXVPG9Zpl+or7Sz5BgAAAABADJXXBCLbLnsbQ7ud7vEAAAAAAMRcudcf2TYMo03X8DjoHg8AAAAAQMxVNAjtbcVEdAAAAAAAtIOG3ePbyl03f52fSjsAAAAAALFTHoNKe7h7fC2hHQAAAACA2Cn3xqLSzuzxAAAAAADEXHlNDMe0U2kHAAAAACB2YtE9niXfAAAAAABoBxWx6B4fqbSbe32tzoTQDgAAAABoVzHpHs+YdgAAAAAAYi8WE9F5HNYzY9oBAAAAAIihCsa0txmhHQAAAADQrpg9vu0I7QAAAACAdhWL7vGuukp7IERoBwAAAAAgZmLRPd5pqwvtQWaP7zAff/yxTj31VPXp00eGYWj69OlRxy+55BIZhhH1OPHEE6POKS4u1vnnn6/09HRlZmbqsssuU2VlZQd+CgAAAABAc2oDQXn9e18dd9mt5+4V2eMc2quqqnTQQQdp6tSpzZ5z4oknasuWLZHHP//5z6jj559/vpYuXaoZM2bonXfe0ccff6wrr7yyvZsOAAAAAGiBWKzRLknOuu7x3Y0jnjefNGmSJk2atNtz3G63cnNzmzy2bNkyffDBB/r666916KGHSpKefvppnXTSSXrkkUfUp0+fmLcZAAAAANBy4dCe5LSrxh9s83Wc3XRwd8J/7Llz5yonJ0fDhg3TVVddpZ07d0aOff7558rMzIwEdkmaMGGCbDabvvzyy2avWVtbq/Ly8qgHAAAAACD2wjPHp7r3rmZsGEa3rLYndGg/8cQT9dJLL2nWrFl66KGHNG/ePE2aNEnBoPXXma1btyonJyfqPQ6HQ1lZWdq6dWuz133wwQeVkZEReeTn57fr5wAAAACA7qq8bhK6FLd9r6/ltCd0hG0Xce0evyfnnntuZPvAAw/UiBEjNHjwYM2dO1fHHXdcm697yy236IYbboi8Li8vJ7gDAAAAQDsId49Pdu19/LRCe9u72HdGnerPFIMGDVJ2drZWrlwpScrNzVVRUVHUOYFAQMXFxc2Og5escfLp6elRDwAAAABA7IW7x8ei0u7qhpX2TvWJN27cqJ07dyovL0+SNGbMGJWWlmrBggWRc2bPnq1QKKTDDz88Xs0EAAAAANQJV9r3dky7JDkdjGlvkUGDBkVNCBdWWlqqQYMGtfg6lZWVWrhwoRYuXChJWrNmjRYuXKj169ersrJSN910k7744gutXbtWs2bN0mmnnaYhQ4Zo4sSJkqThw4frxBNP1BVXXKGvvvpK8+fP1zXXXKNzzz2XmeMBAAAAIAGEx7THrnt899KmT7x27drIZHAN1dbWatOmTS2+zjfffKNRo0Zp1KhRkqQbbrhBo0aN0h133CG73a7FixfrZz/7mfbZZx9ddtllOuSQQ/TJJ5/I7XZHrvHKK69o33331XHHHaeTTjpJRx99tP7yl7+05WMBAAAAAGIsVrPHS92ze3yrfmpvv/12ZPvDDz9URkZG5HUwGNSsWbM0cODAFl9v3LhxMk2z2eMffvjhHq+RlZWlV199tcX3BAAAAAB0nHD3eGaPb5tWhfbTTz9dkrU+3sUXXxx1zOl0auDAgXr00Udj1jgAAAAAQOcWy+7xLgehfbdCoZAkqaCgQF9//bWys7PbpVEAAAAAgK6hvCaGE9HZu99EdG36qa1ZsybW7QAAAAAAdEHhSjvd49umzX/qmDVrlmbNmqWioqJIBT7s+eef3+uGAQAAAAA6v/CYdmaPb5s2/dTuvvtu3XPPPTr00EOVl5cnw+h+XRQAAAAAAHsWy9njCe0t9Oyzz2ratGm68MILY90eAAAAAEAXEQqZqvTFbvb47jgRXZs+sc/n05FHHhnrtgAAAAAAupCK2oDCq3ynxGSd9u7Xy7tNof3yyy9nbXQAAAAAwG6Fu8a7HbaYdG2ne3wLeb1e/eUvf9HMmTM1YsQIOZ3OqOOPPfZYTBoHAAAAAOi8wpPQpSc593BmyxDaW2jx4sUaOXKkJGnJkiVRx5iUDgAAAAAg1S/3lubZ+67xEqG9xebMmRPrdgAAAAAAuphw9/h0T4wq7Y7uVyTufn+mAAAAAAB0iFh3j3dRaW+ZwsLC3XaDnz17dpsbBAAAAADoGmLdPZ7Q3kLh8exhfr9fCxcu1JIlS3TxxRfHol0AAAAAgE6uvKau0h6r7vGE9pZ5/PHHm9x/1113qbKycq8aBAAAAADoGirqKu3pSTGaiM7R/UJ7TD/xBRdcoOeffz6WlwQAAAAAdFLh7vGxq7QzEd1e+fzzz+XxeGJ5SQAAAABAJ1XfPZ4x7W3Vpp/cGWecEfXaNE1t2bJF33zzjW6//faYNAwAAAAA0LlV1Ia7xzOmva3aFNozMjKiXttsNg0bNkz33HOPTjjhhJg0DAAAAADQucV6IjpXNxzT3qbQ/sILL8S6HQAAAACALibWS75RaW+lBQsWaNmyZZKk/fffX6NGjYpJowAAAAAAnV+Ft67SnuSULxDa6+s1nIguGDL3+nqdQZtCe1FRkc4991zNnTtXmZmZkqTS0lIVFhbqtddeU69evWLZRgAAAABAJ7FmR5WqagMyTVNlNfWzx++orN3razestPuDe/9HgM6gTX0Lrr32WlVUVGjp0qUqLi5WcXGxlixZovLycv3mN7+JdRsBAAAAAJ3Amh1VKnxkrk55+lOd+qf5kWp4rLrHN5w9PhaV+86gTT+5Dz74QDNnztTw4cMj+/bbbz9NnTqViegAAAAAoJuqqrW6w08uHKKq2oCmfbZWNkNKdtljcn2bzZDdMBQ0TfmotDcvFArJ6Ww8+5/T6VQo1D1+cAAAAACApvXNTFJG3TJvqW6HDMPYwztazlE3rr27VNrbFNrHjx+v6667Tps3b47s27Rpk66//nodd9xxMWscAAAAAKBz8vqDkqRkV2y6xoc5bHWhnUp78/70pz+pvLxcAwcO1ODBgzV48GAVFBSovLxcTz/9dKzbCAAAAADoZLx+K1SnuGPTNT7MUTeu3d9NKu1t+pNHfn6+vv32W82cOVM//vijJGn48OGaMGFCTBsHAAAAAOicwpX2FHdsK+3hZd+otDdh9uzZ2m+//VReXi7DMHT88cfr2muv1bXXXqvRo0dr//331yeffNJebQUAAAAAdBI14dAe8+7xVoxlTHsTnnjiCV1xxRVKT09vdCwjI0O//vWv9dhjj8WscQAAAACAzqk2UmmPdfd4Ku3NWrRokU488cRmj59wwglasGDBXjcKAAAAANC51UTGtLdPpd0fMGN63UTVqtC+bdu2Jpd6C3M4HNq+ffteNwoAAAAA0Ll526l7PGPad6Nv375asmRJs8cXL16svLy8vW4UAAAAAKBz8wbaqXu8jXXam3XSSSfp9ttvl9frbXSspqZGd955p0455ZSYNQ4AAAAA0DmFK+2pse4eH17yrZtU2lv10/v973+v//znP9pnn310zTXXaNiwYZKkH3/8UVOnTlUwGNRtt93WLg0FAAAAAHQeNT4rtCe3U/f42m5SaW/VT69379767LPPdNVVV+mWW26RaVoD/w3D0MSJEzV16lT17t27XRoKAAAAAOg8vIHwRHSx7h5PpX23BgwYoPfee08lJSVauXKlTNPU0KFD1aNHj/ZoHwAAAACgE2q/7vHda0x7m396PXr00OjRo2PZFgAAAABAFxEO7bHuHh+utHeX2eNj+9MDAAAAAHR7gVBI/qA1nLpRpb10g2Q4pLINbbp2eEw73eMBAAAAAGgDr78+UCe56sa0l220nj95WDK21p/scLfq2uHZ4+keDwAAAABAG4S7xkuSvW5ddfmqrOehx0u9Mqxth1tKyW7VtbvbOu2EdgAAAABATDUM7Y14sqSM3DZfOzIRXTfpHm+LdwMAAAAAAF1Lw+7xseYML/kWMNvtHomE0A4AAAAAiKma3VXa9xKVdgAAAAAA9kJtO4Z2p717LflGaAcAAAAAxFS1rx0r7d1sIjpCOwAAAAAgpto1tNvDY9oJ7QAAAAAAtFq1L9Bu145U2ukeDwAAAABA67VvpZ3QDgAAAABAm7VnaHfSPR4AAAAAgLar8bd/9/haQjsAAAAAAK3XERPRBUKmQiGz3e6TKAjtAAAAAICYaklod3m3K7lidauv7ayrtEvdY1w7oR0AAAAAEFPBPVTA7YFqHfjVzRrxxW+VUr6qVdcOV9olqdZPaAcAAAAAoNXsDSriu+q7+nW5aotlKKQ+a//d5uvWBtqvG36iILQDAAAAAGIu2WVvcn+ab5vy1r8Ted1z2xfyVG1u0z26w2R0hHYAAAAAQMwlOZsO7aO2/Vs2M6CSngerJPtQq9q+bnqb7uH1U2kHAAAAAKDVkl2ORvuOtS1Sv8rFChl2rR32K20a+HNJUq/Ns+WsLW71Pai0AwAAAADQBuHu8SuLKrVkU5lW7fDqdsc/JElb80+WN6WfKjL3U3nGvrKZgagu800q2yDtXCmVbojs6g5j2hv/6QMAAAAAgL2U5rHi5pR/LZQkXWp/X6c5N6nWnqqNg862TjIMbR54htIXPaDeGz/QpoFnKuhMib6Qw209f/yI9WzmSrpCUveYPZ7QDgAAAACIuZw0tx47+yB5/SE5/JU6fc5/paC0Mf9UBZ2pkfNKeh2q6pR8JVdtUO+NH2pzwRnRF0rJlo65UQrUWq+3l0nLrE26xwMAAAAA0AapHqfyMpJUkJ2iA0M/yBWslNd0qqjXkdEnGjZtrhvbnrf+fzKCvsYXS8mWMvpaD09WZDcT0QEAAAAA0AZp7vqO3WlF30qSys1kyWgcQ3fkHqNaT7ZcvhL1LPq8xfeg0g4AAAAAQBukNgzt263QXqGkJs81bU4V5xwhSUopX9Xie3SHiegI7QAAAACAmEutm4hOoYBSdyyUJFWYTYd2SapOyZckJVVvavE9qLQDAAAAANAG4Up7culy2YM1CjiSVSN3s+d7k/tKkpKqNrb4Ht1h9nhCOwAAAAAgJkKmGdkOL/mWtn2BJKkycz9JRrPvrUnpJ0ly1xTJFqxt0f2YiA4AAAAAgBaqqg1EtlPdDql8k9I2zpMkVST12e17/a4MBRypMmTKU72lRfejezwAAAAAAC1U4bVCu8thk6Nqi/SfK5W26VPr2NrvrJMczXSRNwzVpLSui3x3mIjOsedTAAAAAADYs3BoT3bZJX+NnPLLY/hlylDlYddJ7kxrzfVm1KT0U1rZciVVtWwyuu5QaSe0AwAAAABiotzrlyQlO+2SpDTVSJKqUwcomDVkj+8Pj2tvcaWdiegAAAAAAGiZykilvW4SOsMK7RUZw1r0/kj3+Gq6x4cR2gEAAAAAMRGutCe56irt4dCeuW+L3l+TbFXaPVWbJHPPVXQvlXYAAAAAAFqm4Zh2I+hTirySpMoWhnZvUm+FDIfsIZ/c3u17PJ9KOwAAAAAALdQwtKeU/ySbYcrvSJU3KbdlF7DZ5U22loZryWR03WEiOkI7AAAAACAmKsIT0bkcSitZZu1LHSQZRouvER7X7mnBZHSEdgAAAAAAWqhhpT2t5AdrX2pBq65RP4N8SyrtdI8HAAAAAKBFGi75llZaF9rTBrXqGuHQntyCSjsT0QEAAAAA0EKVtValPcsslqu2WCFTqkrp36prRLrHt2DZNyrtAAAAAAC0UHmNFdr7eZdLkqrkUcjmatU1apKt0O7ylcnhr9jtubVU2gEAAAAA2DNfIKQav1X57ldTF9pNT6uvE3IkqdbdU1Ldeu27wUR0AAAAAAC0QGmNL7KdV22F9mq1PrRLDSej230XebrHAwAAAADQAmXV/sh2z8oVkqRq092ma7U8tIdkmmab7tFZENoBAAAAAHutpC60GwopxbtVklSttoZ2a1z7npZ9M03JF+zaXeQJ7QAAAACAvVZSbXWPT1atJKnWk62g7G26Vksr7VLXH9dOaAcAAAAA7LVw9/gMVUmSqtMGtvla4dDuqdkmI+Tf7bldfQZ5QjsAAAAAYK+FK+3ZRqkkqSZ1YJuv5Xf1UMCRLEMheaq3NHmOq66I39UnoyO0AwAAAAD2WnhMe19jh6S9q7TLMCLrtTfXRd5lNyTRPR4AAAAAgD0qrau0Fxh1k9ClFezV9fY0rt1ls0K710+lHQAAAACA3Sqtq7TnGcUKyaaa1P57db360N70DPLOSPd4Ku0AAAAAAOxWeEx7plGpypT+CtnbttxbWG1SjiTJVbujyeOR7vFMRAcAAAAAwO6FK+09VKnS1CF7fT2fO0uS5PIWN3m8fkw73eMBAAAAANit0pr6SntZWgxDe22xZJqNjjvr0izd4wEAAAAA2A3TNCOzx2calSpNHbrX1/TXhXZ7qFb2QHWj48weDwAAAABAC9T4g/LVheceqohJpT1kdyvgSJFUV23fRTi0M3s8AAAAAAC7Ea6yu+SX3Qypwm+Tyjbs9XWjusjvwtVNZo93xLsBAAAAAIDOLbxGe4YqtUp9ZH76mFS3XrscbZ9F3ufOUnLVBjmbCO3OyOzxXbvSTmgHAAAAAOyVyMzxRqV+NPOlocdLvTKswJ6S3ebr7rbSbuseY9oJ7QAAAACAvRJZo12VWhHK1wBPlpSRu9fX3W1or0uzXT20x3VM+8cff6xTTz1Vffr0kWEYmj59etRx0zR1xx13KC8vT0lJSZowYYJ++umnqHOKi4t1/vnnKz09XZmZmbrssstUWVnZgZ8CAAAAALq3hjPHLzf7xey6/kho39noWKTS3sW7x8c1tFdVVemggw7S1KlTmzz+xz/+UU899ZSeffZZffnll0pJSdHEiRPl9Xoj55x//vlaunSpZsyYoXfeeUcff/yxrrzyyo76CAAAAADQ7ZVV1kiyuscvD/WP2XV9np6SJFdtSaNj3WXJt7h2j580aZImTZrU5DHTNPXEE0/o97//vU477TRJ0ksvvaTevXtr+vTpOvfcc7Vs2TJ98MEH+vrrr3XooYdKkp5++mmddNJJeuSRR9SnT58O+ywAAAAA0F2VFG+XJGXYarUlmCWpIibX3V33eGdk9ngq7XGxZs0abd26VRMmTIjsy8jI0OGHH67PP/9ckvT5558rMzMzEtglacKECbLZbPryyy+bvXZtba3Ky8ujHgAAAACAtikpsUJ1isclyYjZdX3uHpIkZ22JZEZX1COVdn/XrrQnbGjfutVaHqB3795R+3v37h05tnXrVuXk5EQddzgcysrKipzTlAcffFAZGRmRR35+foxbDwAAAADdx87yKklSRkpSTK/rd/WQKUM2MyCHP7rY6uwms8cnbGhvT7fccovKysoijw0bNsS7SQAAAADQaW2vsrqop6enx/S6ps0hvytDUuMu8m66x8dXbq61PMC2bdui9m/bti1yLDc3V0VFRVHHA4GAiouLI+c0xe12Kz09PeoBAAAAAGib7T6nJCkts1fMrx0Z1+6NDu3Ouu7xXrrHx0dBQYFyc3M1a9asyL7y8nJ9+eWXGjNmjCRpzJgxKi0t1YIFCyLnzJ49W6FQSIcffniHtxkAAAAAupugt0o7QymSpOSsvJhfv37Zt+gZ5Otnj+/alfa4zh5fWVmplStXRl6vWbNGCxcuVFZWlvr3768pU6bovvvu09ChQ1VQUKDbb79dffr00emnny5JGj58uE488URdccUVevbZZ+X3+3XNNdfo3HPPZeZ4AAAAAOgAOzcuV0g22RRSalpPSY3XVN8b4cnodl2r3RXpHt+1K+1xDe3ffPONCgsLI69vuOEGSdLFF1+sadOm6f/+7/9UVVWlK6+8UqWlpTr66KP1wQcfyOPxRN7zyiuv6JprrtFxxx0nm82mM888U0899VSHfxYAAAAA6I62b1wlKVk97TWy22PfmdvnDq/Vvkv3eFv3mD0+rqF93LhxMk2z2eOGYeiee+7RPffc0+w5WVlZevXVV9ujeQAAAACAPdi+daOkfdTL0z7hOTym3bnrRHSO7tE9PmHHtAMAAAAAEl/Rzh2SpF6pzna5fmQiukaVduuZiegAAAAAAGjG9tJKSVKvjNR2uX5zob27TERHaAcAAAAAtI2/RturrUp3r+zs9rlFuHu8r0xGKBDZXx/aqbQDAAAAANDYjp+03cyQJOVkZbXLLfyudIUMuwyZcvpKI/ud3WT2eEI7AAAAAKBttv+o7WamJKlXurt97mHY5I8s+1bfRd5dV2kPhkwFgl03uBPaAQAAAABts/1HbVemJKlXajuFdjU9g3x4yTepa1fbCe0AAAAAgLYp+jHSPb5XWvuH9oaV9nD3eEny+rvuZHSEdgAAAABAm1RvW6VKJUvq+NBuMwy57FakpdIOAAAAAEBDfq92lJRIkpKcNqW6He12q+aWfXM7CO0AAAAAADS28ydtN9MlSb3SPDIMYw9vaDt/c6G9ro883eMBAAAAAGio6EcVhWeOb8eu8VKDSrs3OrQnuaxIS2gHAAAAAKChhsu9tePM8ZLkc/eU1LjS7nFYlfYaQjsAAAAAAA00DO3tXmm31ml3BCplD/ki+5NcdI8HAAAAAKCx7T9qu6zl3nLaObQHHSkK2lySJE+gLLLfUzemvcbHRHQAAAAAAFj8Xql4dYdV2mUYkcnokgOlkd1JTrrHAwAAAAAQbedKyQypyLDGmrd7aFf9uPakBpX27hDa228hPQAAAABA17T9R+vJsKrfvbzrpM1bpNL1kpLa5ZbhGeST/KWRfZEx7T5COwAAAAAAlu0/KmQa2hFIkST1mn6OZBRLoYGSHpAczpjfMjwZXVITY9q78kR0hHYAAAAAQOsULVOpUhWoG3GdffSlUo98qcQhzZLk6RHzW/qaGNPucVr3p3s8AAAAAABh25dru2nNHJ+lcjl75Es9h0ihoKSqdrllpHs8E9EBAAAAANAMf41UvEpF4ZnjjdIOua3P0/xEdF25ezyhHQAAAADQYpt+WiiZIW119JUkpaqmQ+7rd9WNafc3CO2u8DrthHYAAAAAQDe3ZkeVHvvHdEnSd7VWaF9gDtOaCnu739vvzpQkOc1aeVQrqX4iOrrHAwAAAAC6varagPa1rZckeTMK6vcHjHa/d9CepJDNmpU+2yiX1HBMe6jd7x8vhHYAAAAAQIsNMzZIksqcuR17Y8OQ35UpScqW1UU+sk47lXYAAAAAAKR9bVZo3+6L/Vrse+J3WTPW9zSs0B5e8o3QDgAAAADo9uzbvldO3WzxO0vrJ4STw90h949U2uu6x0fGtDMRHQAAAACgu/PsXCpJqnBkqdTRs/5ASk6H3D9SaQ93j2ciOgAAAAAALJ7yNZKkIvdAVQU6Pk6GK+29DMa0AwAAAAAQxVNmhfa1zkGSJHv7TxofpX5M+y6zx9M9HgAAAADQ3bnrKu3r7QMkSWmujr1/OLRnN9E93jTNjm1MByG0AwAAAAD2LBSUp2KdJGmzzVruLdXVsaX2cPf4cKXdU9c9PmRKvmDXXKud0A4AAAAA2LOStbIFvfKaTm03e0iS0pwdHdqju8d7HPbIMa+f0A4AAAAA6K62WTPHrzD7qaJuErq0OFXas1QhhYJy2g3ZbVYbuupkdIR2AAAAAMCeFf0gSVoeyo+E9tQOH9OeJlOGbIYpu69chmF0+cnoCO0AAAAAgD3btkSS9KOZrwq/Vd3u6Eq7DLtq7amSJEdtiSTJ08XXaie0AwAAAAD2bJtVaf/R7K/KcGjv4DHtkuR1pEmSHLWlkqQklxVru2pod8S7AQAAAACABOerlopXS5KWh/or4A93j49DaLeHQ7tVaQ93j/d20e7xhHYAAAAAwO5tXybJVMCVqR3eDNn81proGe54VNrTJTWotNM9HgAAAADQrdV1jfdmFEiSQjJkN6S0Dp6ITmpcaQ+PaWfJNwAAAABA97T+M0mS150d2ZWdZMhmxL/SzkR0AAAAAIDua+cqaeGrkiTv+m8ju3sld3xglxpORBc9pp3QDgAAAADofmorIpvekb+KbGcnxSm013WPt0dmj+/aE9ER2gEAAAAAzasurtswVJs3OrK7V3J84mSto+kx7VTaAQAAAADdz44V1nNKL5l2d2R3rzhX2h21pZJp0j0eAAAAANCNhUN7Rn7U7uy4jWm3JqKzhfxSbbmSXFasraF7PAAAAACg29mx3HrO7KeQaUZ2x6vSHrS5VGl6rBdVOyKV9toAoR0AAAAA0N1sr6+0F3ut0G4zpB6e+IR2SdphZlgblUX1Y9qptAMAAAAAupWqHVJVkbWd3k9F1VZoz3AZstviF9p3yuoir6rtTEQHAAAAAOimNi+s33Z6IqE9M45VdqlBpb2qqMFEdKE4tqj9ENoBAAAAAE3b8l3Uy6IqKxjHs2u8JO00w5X2HazTDgAAAADophpW2iVtC1fa3U2c24G2q35MO0u+AQAAAAC6py2Lo16Gu8cnTqWdMe0AAAAAgO6oulgqWx+1q6i6rnu8O1HGtG+v7x5PaAcAAAAAdBub68azZ/STJIVMJcxEdDsbhHaP04q1hHYAAAAAQPexZaH1nL2PJGlHrU3hCdozXHGutIeXfKvcXj+mnYnoAAAAAADdRngSuuxhkqSNVfXxMZ5rtEsNusfXlinJZoX1Gn9QpmnGsVXtg9AOAAAAAGgsXGnvZVXaN1bb49eWXZQpRaZhtcfjK5Fkdd/3BbveWu2EdgAAAABAtOpiqbRuErq67vGbqhIntEuGAu5MSVKSb2dkr9dHaAcAAAAAdHXhKnvWIMmVKknaWJ1Y8THg7iFJcnp3ylHXXb8rLvuWWD91AAAAAED8hcez542M7NqYUJV2RSrtqiyqn4yO0A4AAAAA6PLClfa8gyK7NiXQmHapvtKuqu1yO7vuWu2OeDcAAAAAAJBgwpX2PiMlSaa5+4noVpaGop47QqTSXrVdSS6rHt0VK+2EdgAAAABAvepiqXSdtZ13kFSyTjuVLm/QkCGp4aJqnrpEOWV2TdQlPB2QNBtW2sPd471dcK12QjsAAAAAoN6WRdZzj4FSUg+pZJ02mdmSpCyPoZ3e+tiel2rTY4UeeQP1b/c4rP3tLdhNxrQT2gEAAAAA9TYtsJ77jIrs2mj2kiT1TokO7VLHBPSmrPelq5+kmtJtkdnaumJoZyI6AAAAAEC9DV9Zz/mHR3aFQ3uvZCMeLYricVh/NLh/gRVnK3Zu1qKNZZKkmi7YPZ7QDgAAAACwhELShi+t7QahPdw9vndy/CNkXlJIjzmm6vIj8yVJ2UaFwiPtmT0eAAAAANB17VgheUslZ7KUe2Bk98a60J6TAJV2ScozimX06ilJsikol/zyySWvv+Nmr+8o8f8zCQAAAAAgMYSr7H0PkezOyO5w9/hECe2SZNoc8rsyJUke+SQxph0AAAAA0JVFusYfFtllmmZCdY9vyJ9ktStFtZII7QAAAACAriwS2o+I7Cr1hlSlJEmJMRFdQ36PFdpTZa0Tz0R0AAAAAICuqWqHtHOltZ0/OrJ7XZkVhHM8QbnsCRba6yrt6UaVpK45ER2hHQAAAAAg/fCW9dxjoFRdHNm9uiQgSRqUlniBOFxpz6gL7XSPBwAAAAB0PTtXSe/eYG2XrJWePtjaJ2l1aeKGdl9daM9UhSS6xwMAAAAAuqLaivrtfSZG7Vtd4pckDUoNdHSr9sifZM1q39Ow2uoNsOQbAAAAAKCrCfrqt/scGnUo3D1+cAJW2sNj2nsa5ZIkL5V2AAAAAECXs2OF9exKlVKyI7tDIVNr6rrHFyRiaK/rHp+jEkmMaQcAAAAAdEVbl1jPPQZKRv0M8ZvLalQblJwKqF9y4gXicPf4HKNUEqEdAAAAANAVbasL7VmDonav3m7Nyj7A2CpHAqZHv6enJCnV8EpiIjoAAAAAQFdjmvWhvcfA+v07Vmj1quWSpEHGlo5vVwuYNqe8zkwlqVYS67QDAAAAALqa4tVSjTUmXBn5kjPJ2v7PFVr96RuS6kJ7eH+C8bp7KknWRHpdsXu8I94NAAAAAADE0Yav6rftTim9r/Tz5yR/jVZ/nCkVSWn7n6AltT21sjTxllTzunrKY+yQZIV20zRlNBiX39kR2gEAAACgO1v/eeN96X0lSaurKySZenixRw8vrooc9iRQkvS6eyqrrtJumpIvGJLbYY9zq2IngX7UAAAAAIAOt+bjJndX+01trjQjryePcqlvqk0eh5SXmjgjrRt2j5ckr69rhfbE+UkDAAAAADpWyVqpZI1kNA65a8utrvBpLut131SbCjJtCRXYJanGlS2nEZRDVnu72rj2xPppAwAAAAA6zup51nPOfo0P1Y1f75tgIX1XXre17JvH5pdEaAcAAAAAdBWr51rPfQ9pfKgutPfrLKFddaG9i63Vntg/fQAAAABA+wiFpDV1lfZ+zYf2PmmJPRO715UtSUqSVxKVdgAAAABAV7BtiVS9U3KlNt09vswKv52l0p5s1livCe0AAAAAgE4v3DV+wFGSLXphMdM068e0pyV2bPS6eihkGvLUzSBPaAcAAAAAdH7h0D5oXKND26tNVfolmyH1SUns7vGmzakSpcqjWkl0jwcAAAAAdHaBWmndZ9Z2E6F9dVndJHRphpz2xA7tkrTDzFCSYVXamYgOAAAAANC5bfhKCtRIKTlSzvBGh8Nd4wdlNF6/PRFtNzOUVFdpp3s8AAAAAKBza9g13mhcSQ9X2gsyOkdk3KEMJdWNaad7PAAAAACgc9vNeHapQaU9s3NExu1mpjxG3Zh2XyjOrYmtzvENAAAAAABio6ZU2vyttT1obJOnhEP74E4S2neYVNoBAAAAAF3B2k8lMyT1HCpl9Gt02Bc0taGic1XadzCmHQAAAADQJeyha/z68pCCppTskHonJ/7M8ZK0XRnyGKzT3uHuuusuGYYR9dh3330jx71eryZPnqyePXsqNTVVZ555prZt2xbHFgMAAABAgls9x3pubjx7eBK6TJuMJiapS0Q7zAx56B4fH/vvv7+2bNkSeXz66aeRY9dff73+97//6Y033tC8efO0efNmnXHGGXFsLQAAAAAksB0/STtXSjanVHBMk6f8VGKF9iGdpGu8ZE1EF+4eX+MLxLk1seWIdwP2xOFwKDc3t9H+srIy/f3vf9err76q8ePHS5JeeOEFDR8+XF988YWOOOKIjm4qAAAAACS2H9+1ngceLXkymj5lp1WpHpbVOdZol6Ripclj+CVJNV5vnFsTWwn/p5OffvpJffr00aBBg3T++edr/fr1kqQFCxbI7/drwoQJkXP33Xdf9e/fX59//vlur1lbW6vy8vKoBwAAAAB0ecvfs573PbnRoTWhXC0pcWhRkRXah/dM+LgYEZRdbqdTkuT11sa5NbGV0N/C4YcfrmnTpumDDz7Qn//8Z61Zs0bHHHOMKioqtHXrVrlcLmVmZka9p3fv3tq6detur/vggw8qIyMj8sjPz2/HTwEAAAAACaCySNrwlbU9bFLUoTWlARX6HtMps7K0vsKUZE1E15m4PMmSpJpaX5xbElsJ/TVMmlT/izRixAgdfvjhGjBggF5//XUlJSW1+bq33HKLbrjhhsjr8vJygjsAAACArm3FB5JMKe8ga6m3nauk2gpJUlXRWklJOntAjV5fZ2WtZEfnmIQuzFkX2r1+xrTHTWZmpvbZZx+tXLlSxx9/vHw+n0pLS6Oq7du2bWtyDHxDbrdbbre7nVsLAAAAAAlk+fvW87CTrcD+9MH1x0IDJT0g01Y/jr2zzBwf5vCkSpJq/KE4tyS2Erp7/K4qKyu1atUq5eXl6ZBDDpHT6dSsWbMix5cvX67169drzJgxcWwlAAAAACQYX7W0qm6pt31PilTYdcyN0ilPSMf8VpK01df2Hs3x5kyyJtbzdq1Ce2JX2n/729/q1FNP1YABA7R582bdeeedstvtOu+885SRkaHLLrtMN9xwg7KyspSenq5rr71WY8aMYeZ4AAAAAGho9RwpUCNl9Jd6HyBtWWTtz8iXeg6RQkFJVdpabca1mXvDmZwuSaoJdq4eAnuS0KF948aNOu+887Rz50716tVLRx99tL744gv16tVLkvT444/LZrPpzDPPVG1trSZOnKhnnnkmzq0GAAAAgATzY92s8fmjrcC+Y0WTp22t6rxdyx2pWZKkmpBdpml2uu79zUno0P7aa6/t9rjH49HUqVM1derUDmoRAAAAAHQyoaC0vG599iX/th5hzuju8BWdeOJ1R0pPSZIpQ7WBkDzOzrPO/O4kdGgHAAAAAOylDV9JNSXW9lFTpB4DrW1nkpTet9HpPTyGSrydr5u8PbWnJOtzev1BQjsAAAAAoBMIV9klK7D3HLLb03NTrNC+stTqKh9+TnRmSo6c2i6/HKrx+pSZ7Ip3k2KC0A4AAAAAXZVp1o9nb6F+qYaW7ZSmzK6J2u9J8PQY9GTJI5/8cshbsVPKSo13k2IiwX/sAAAAAIA2K1omFa+SbA4p1LK10PbLtmvSIGfU0mkeh5SXmuArhtvs8hh+VZhSTdl2SQPi3aKYILQDAAAAQFf1/evWc/7h0rr5zZ4WNOvHsOen2RI/oDcjyR6SAlJNRXG8mxIznfObAAAAAADsXigkff+mtT1kwm5P3VplhXaHzRrT3lkl2a3P4a0oiXNLYofQDgAAAABd0YYvpLINkitNGnDUbk9dV2ZNNpeTbMhu67yh3eOw2l5TVR7nlsQO3eMBAAAAoCtaXNc1fvipksO921PX1IX23JROVNct22A9l9YPvk/q2U+qqlLNwN33LOhMCO0AAAAA0NUEfNIP063tEb/Y4+nryutCe3InqLKH/wDx8SPWs5kr6QqpbKOSkpIlVanG7DpRt+t8EgAAAACAZdUsqaZESu0tFYyVtn6/29PrK+2dILSnZEvH3CgFaq3X28ukZZJ8VfI47ZKkWn8wfu2LMUI7AAAAAHQ14a7xB5wp2ey7PbXab0YmouvdWbrHp2TXb1fUf75DBvSQzTDULys5Do1qH4R2AAAAAOhKaiuk5e9b2yPO3uPpK0pCCi/4lubqBJX23bj8mEHxbkLMEdoBAAAAoKvYuUr6/g0pUCNl9JdMSZsXSjtWNPuW5cVdpyt5V0RoBwAAAICuYOcq6emD61+XrZf+Oi76HGdSo7ct2xlq33ZhrxDaAQAAAKArqK2o2zAkmVLh76PHfjuTpPS+jd72/XYq7YmM0A4AAAAAXYop9Rom9T9ij2f6gqaW7CC0J7JOMjUgAAAAAGC3zAbd3Iec0KK3/FgcUm1QSnW2U5uw1wjtAAAAANAVbFpgPTs81trsLbBwm1VlH5a1+2XhED+EdgAAAADoCn5423ruN1pyelr0lu+KrNC+Tw+iYaLimwEAAACAzq58s7RuvrU94MgWv21hXWjfN4tomKiYiA4AAAAAOpOdq+pninenST0HS9++LJl1E8ql5bXoMiXekNaUWePg96F7fMIitAMAAABAZ7HrWuySNPkracG0Vl8qXGUflGFTmsuIQePQHugDAQAAAACdRbjCfsyN1kOSfvpIqtgseTJadanv6iahG5lDlT2REdoBAAAAoLPJyLceUv0EdMNOatUlwpX2Ub0J7YmM0A4AAAAAndnGr6zn4T9r8VtCphkJ7VTaExtj2gEAAACgsxt8nJTep8WnrykLqdwnuWxSMGRqbbnZjo3D3iC0AwAAAEBn5K+u3z7sila99aO1fkmSLySdPr3+Oh4SYsLhKwEAAACAzii8LnuPAmnoRGnr4ha/9fsia6m3kTk2nT3MJckK7HmpjKBONIR2AAAAAOhsgj5p9Txr+6DzJFvrwvbyYiu0D8+yqyCToJ7I+HYAAAAAoLPZ8JXkq7S2hxzXqrfW+E2tKbdCe346kTDR8Q0BAAAAQGezek79tq11Hai/3xFUqG7euQxXDNuEdkFoBwAAAIDOpnqn5Epp01vDS71JkmEYsWoR2gmhHQAAAAA6C7PB0mwFY9t0ie+2Bfd8EhIGoR0AAAAAOosNX1rPdrc04OhWv900TS0gtHcqzB4PAAAAAJ2BaUoL/2FtDzhSciVb2ztWRD/vxpIdIRVVm/LYJS/ZvVMgtAMAAABAZ7DiQ2nr99Z2wVjJmWRt/+eK6PPC+5swY61fknRwb7s+20xq7wwI7QAAAACQ6EJBaeZd9a+TMqX0vtLPn5P8NfX7nUnW/mbMWh+QJB2WR2jvLAjtAAAAAJDoFv1T2r5McqdJtRX1+3cT0He1uTKkpTtCMiQd2tshyRfzZiL2mIgOAAAAABKZv0aa84C1PfKCNl9m1jqryn5wb7syPSz11lkQ2gEAAAAgkX35rFS+ScrIl/b/eZsvEw7txw2gw3VnQmgHAAAAgERVXSx98ri1XXib5HC36TJVflOfbbJC+/EDCe2dCaEdAAAAABLVJ49KtWVS7wOkEWe3/TIbA/KFpP7phoZkEgM7E74tAAAAAEhExaulr/5qbU+4S7LZ23ypmWvrusb3d8owGM/emRDaAQAAACDRmKb0zvVSsFYaVCgNmdDmSwVDpuasp2t8Z0VoBwAAAIBE8/0b0uq5ksMjnfyotBfV8YVFQe30mkpzSaNz216tR3zwZxYAAAAASCTVxdIHt1jbx94k9Ry8V5ebWTdr/Ohcu5YXhyRJK0tDe3VNdBxCOwAAAAAkkhl3SNU7pF7DpSN/s9eXCy/1Nnt9ULPXV0Ud85AIEx5fEQAAAAAkirXzpe9etrZPfUJyuPbqcsuLg1pREpLNkEKmNHmUS31TrVHSHoeUl8qI6URHaAcAAACARBCold6ZYm0fconU/4i9vuSfv/NJko7Is+uzzUH1TbWpgCXfOhW+LQAAAABIBHMekHaskFJyrCXe9tL68pDeXuWXJJ09zLnX10N8ENoBAAAAIN5Wz5PmP2ltn/KYlNRjry/57MJahUxpbL5dQ3owa3xnRfd4AAAAAGgPO1dJtRX1r91p9TPBNzzmLZPe/JUkU9r3VCkj3zq+F7PGb6sK6c3lVpV98ih3m6+D+CO0AwAAAECs7VwlPX1w4/3Xfms9N3VMkn78n/UIn9vG4P63xT75QtYyb4flObRke7BN10H8EdoBAAAAINbCVfRjbrQq52UbpE8eja68H3OjVLpe+v4NybBLR18vZfSrP3fTAuv8hhX6FijxhvTKD9YEdFeP2rvZ5xF/hHYAAAAAaC8Z+VLPIU0fszmlH962tg+5RBo0ztp2JlnP/7mi/txWVN1f+N6n6oC0X0+bxuUT+To7vkEAAAAAiIcFL0jBWqnPKGm/0+r3p/eVfv6c5K9pukK/G5U+U9OWWFX2yaPcMgyjPVqODkRoBwAAAICOZIas58ptUnJP6egbJGOXhb3S+0a/3rEi+rnOmrKgqnz1r19Z5lO5TxqUadOJBY7I8ZWloRh/CHQUQjsAAAAAdKQF06xnm0MqvG33y7s11VW+bv+asqAKX6tq8m2/O9yt9RWhRsc9JMBOh68MAAAAADrKmo+lb1+0tkecLWXvs/vzG3aVD3MmSel9VVU3I/zkUS7lphh6coFPO2pMHd3XrhMGOiMzxk8e5VLfVJs8Dikv1dbUXZDACO0AAAAA0FHmPFC/3e+wlr1n167yu+ibatP3O4LaUWNKkn59kLvR8YJMwnpnxTcHAAAAAO3NW2Y9B2qkPs2s0d5Ca8qCWrI9GBmnvqMmpDeX+yPHd3rNqOPo3Ki0AwAAAEB78lVJXz1nbaf1kSbcKb102u7f04ymxrG/tdIvf0ga1sOm5SUhTZldE3WcceydG18fAAAAADS0c1X0EmvutBavkd5I0C/NuU8q32y9HjNZqixqc9PCM8WHx6l/uy2gN1cE5LZLkw92KRCSvIH68xnH3vkR2gEAAAAgbOcq6ekmuq9f+23bgvvCf0hbv5ccHinglT66rf5YeGb4NuibalOGW3p3tZXQz97XqV7JhPOuiNAOAAAAAGHhCvsxN0oZ+VLZBumTR6Mr7y1hWpPCacsia2m38bdLKdn1s8DXzQDfVqZp6vnvA6oJSEPq1mRH18Q3CwAAAAC7ysiXeg5p23tDIWn+E3UvDOsPAHkHxaplkqTvd4S0YFtQdkO68iCXbIYR0+sjcdB/AgAAAABiJRSU3r5W+mG69XrE2dLAY2J+m/+tsmaLP32oQ/npxLqujEo7AAAAAMRC0C/999fSkn9Lhk0yQ1L/Me1yqyq/1DfV0GlDnO1yfSQO/iQDAAAAAHvL75XeuMQK7DaHdNyd7XKbBdusiecMWd3inXa6xXd1VNoBAAAAYG+UbpBev1Da/J1kd0vnvCyl9o75bbwBU898Z635dkQfu/bJssf8Hkg8VNoBAAAAoK1Wz5P+MtYK7ElZ0gVvSvtMbJdbPfNdrbZVW7PSnzCQ+mt3wTcNAAAAAK1lmtJnT0sz77TGrucdJJ39stRjQLvcbl1ZSM8u8kVeu+kW38jK4oC0qUwpbocKslPi3ZyYIbQDAAAAQGsUr5He+620cqb1+qBfSqc8Zq293k7u+cwrX1AamWPTwqJQu92nM/I4rN4HUz4qkT76VJI057fjukxwJ7QDAAAAQEsE/VZ1fd5DUsAr2V3SxAek0ZdLhiHtXCXVVljn7lgRs9u+8kOtZq0PyGFIkwqcWlhUG7NrdwV5SSE95pgq7zG3aZOyNXXOSlXVBuLdrJghtAMAAADAnmxZJE2/Sir6wXo98BjplMel7KHW652rpKcPbvy+vay+/7gzoNs+sUJ6wJQe/NLa9pDkouQZxVIPh2S2X2+HeOGrBgAAAND9NKyKu9OknoObPq94tfX8v99Yz0lZVnX9oHOt6npY+FrH3Chl5FvbziQpve9eNfOlpX5JUopT+u1ot9x2Qx6HlJfKnOLdBaEdAAAAQPfSVFX82m+jg3vxamnBi9KW76zXNod08EVS4e+llJ7NXzsjX+o5JCbNXF0a1JvLrdB+6mCn9u3JEm/dEaEdAAAAQPfSsCouSZ88au0LBa0l3CTps6esZ8NmzQ5/zivSsBM7rImBkKkb53jlq5tz7sBsKuvdFaEdAAAAQPcU7sYuSV/+WVr9sVSx2Xpts0uDj5PyD5dm3yul5XZo055b5NN3RUElO6TqgGQYLPHWXfHnGgAAAADdU22F9NNH1vai1+oDuyRNfFA68jdS8m66wreTZTuDeuIba8K5Xx/k6vD7I7FQaQcAAADQvZRttJ5n3S2F6pYGS86WBo2T+o2WkjIbTyAXXsJtd5PWxYAvaOqGOTXyh6QJAxwa39+hxxf42u1+SHyEdgAAAADdw4avpPlPSj++a70OBaTsfaQDzpTyj7C6xO8qvGTbf66o37frpHUx9PS3tVq2M6QeHkMPHOtRUZXZLvdB50FoBwAAANA5NVy2bVfhingoJK34wJpYbv3n0eeMuVYaekL00m27Su8r/fw5yV8jlW2wJq3btKBFy8WtKQuqqkGRPMUlFWQ0PwP8/I0BPfOd9Yb7j/EoJ9mmoqpg821Dt0BoBwAAAND5NLVs267G3yEtfq2+a7vNKY04Rxp6vPTGxVbYbskEb+Gu8k1V3SWr8r6LNWVBFb5W1Wj/nHNTmgzun2wM6PIPqhU0pdOHOHXSIOee24VugdAOAAAAoPNpuGxbw1ng/dXS8vektZ9Ks++x9rnTpf1/Lg3/mZSSXR/iW6th1V2qr7w3Ue0PV9gnj3Kpb6pNmypDmvqdL6ryHjZvQ0BXfFgtX1A6rr9DD43ztK196JII7QAAAAA6r4x8qecQqWq79MNb0ooPpUBdqE7pJR11nVRwrPTcsdK3L0a/N1w5b41dJ6jbg76pNhVkNr9o15z1fv36oxr5gtLxAx3604Qkue0s74Z6hHYAAACgM9h1/HY7z2K+xzbE4/5NKVlrhfU1H0tm3fjvtFypYqt07j+l/NHS5oXW/oZVeWdSqwN4s9pQuf9xZ0D/XhHQC0t8CoSkMX3sunqkSz8Vh6LOW1kaauYK6C4I7QAAAECia278djvOYt6iNnTk/RuqKZGWvGltz3+ifn/ugdL+Z0ieTOnd6yX7LuPCw1X5WGlqjPseqvfVflMPflGj5xb5o/Z/vjmon0+vbvZ9HpJbt8VXDwAAACS6Xcdv72YsdYe0Qer4+3vLra7vy96SfpohBbzWfptTKjhGGnay1GuYtW/nyo5p065j3Jup3lf4TC3dYfUCuOzDalXUjWtPc0mF+Q6NzrPLvpsJ8TwOKS+1+S726NoI7QAAAEBnEetKcVvb0BJ725XeNKXty6V1n0orPpJWz5GCDWZxyxosFa+Sjr9byh3RumvHUhMh3TRN/VRihfQnF9RqW3X9WusVPqlPqqHNlab+b7RbQ7KaXwIOkAjtAAAAAGKttV3pQyGpbL204ycrqG/8Slr3mTW5XEPZ+1gzwO/3M+s9fx0nOZPb5SO0xfrykKb/5Nf0lX6trhuLHg7svZIMba8xddsRbh2WZ9dp/62W3caEc9gzQjsAAACAvdewsh6emG3XrvS+aqvr+o4VVkDfsaJ+O1jb+JqOJCn3ACn3IKsLfO6B9cE/PLlcnK0pC2pjeUgvLfVr5rqAwjV1p03yh6Tz9nVqbH+HimtM3fqJV2P6EMHaTdkGKRSIdytijt8YAAAAAHunuYnyKrZIZRut7VfPkSq3tux6R1wt7XeaNaHcM4dLG7+WvvmbdSxek981YXlxQBPfaHryOH/dpO+j8+zKcBsqrjGbPA8x4HBbzx8/Ipm5kq6wfu/6ZsS1WbFCaAcAAAC6s71ZSs5fLW34Slr+vvU6Lc+a2T08SdzCV+vPDQd2T4aU2b/uMUAy7NLnT0tHXy8ZNqsqP+Icqc/I6KXaJOvYpgVWe1uyzFr4nDYsydacNWVBVdaa+nxzUM8tqh9j3y/N0CmDnOqfXj9hHBPIdZCUbOt3JFArbS+TlknyVcW7VTFDaAcAAAC6q5YuJWea1rrnkjWDe/lma/uFkyQ1qCBXbLGeDZuU2lvK6GdVy1fOqD/HWyZt/d56NNRr3/pZ2HeVkd/08mpS00ustebcVlhTFlTha43D4HnDnTplsEO23cwAj3aWkm09V3S9if0I7QAAAECi2Juqd1s0t5TcuvnSqtlS0Q/StqVS0TKpttw6d8X7DS5gSqm5Uo/+VsV91IVS/uFSeh/J7qo/7cCzmg/kUv1SaeGl2pqqkO+6vFrD9+2qBeeuKQuqqq5QnuKSCjJ2H/a2V4f00Jf14+4dhnR0P7smDnRoYGbLg+LKugnqgJYitAMAAACJYHdV71jx10g1pZK31HpeN9/av+5zKTBHKlljvX772sbvtTmsSb76HmqF32VvSRe+JQ0eZ3Vj/8tYqe8hUo+Bjd/bVLBuyp4q5C29zh7ObapiPufclEbBPRgy9cnGoP71o08z1wUi49QPyLbpyoNc6pXc8q7vnrrkNWV2TaN9wO7wawIAANBd7O262XXWrFymqkrrOimpaSoYMjxWLYyvjq5yh5mm1WV8y0Lr9ZAJ1jjviq2q2vKj9OrFsvvKZTMdMj68XTYzKCPok2FKxl/HW++JdMtu0D3bsEUfCwWlkL/pNqz/LPp1Upa1vFrOcGnAUVLv/a0xwn+fII26wDpn2VtSUuZefvhdtKaaruhqeVP8QVPVAckbMOWwSwPT7bLbpHVlVvr+1YFO2ST97Xu/dlabMs2gNlSY+m5bQGvKQvpsU1DbG0wgNyzLpuXFIf1yeOsCu2SNbX+s0CNv3eTmjHdHSxHaAQAAuoPWrpvdjDUrl6nwb6sb7NmuOZer8wf3lo7tbg3TtCZkqymRyrdIFZubeK7b9jeo+q6cGdlMMSTtrBv7bUjyN/ijgiHJDNbdq21NjBh2svU57U6re3xNsbThC+tx5LXWsY5aYq2F1fTmxpe3xvPf1/8R46y3m54FPuyvEz3KS7HrlP+0/Z6EdLRFlwntU6dO1cMPP6ytW7fqoIMO0tNPP63DDjss3s0CAABIDA3HLkv162a3UrjCPnmgNRHZ1LV9IvsSkmlKQZ9VuQ14m38u+tE6f/BxkitVqt5hdR2f+wermhz0W4+Q37pe+HXQZ3UZb7gvUGNVzr1l1v6WcmdItWVS7wMUTO+vNVVO/WltHw3tmyN7UrqSV72r9CFHyZ7eW5tqHHp+SVAvHV2ifTN2WZe6YpP01V+lX74u5eyn+kRvSKXrpGkn149hlxpXsrP3sX4u4fHtbfg9aQ9VflM/7gxq6Y6QftgZ1Ddbgy1+r92QgqaU5JBCphQMSYEm/tCR7JB6JRtaV25qRC+bhmfZlemRnlvkV15K15vgDJ1Dlwjt//rXv3TDDTfo2Wef1eGHH64nnnhCEydO1PLly5WTkxPv5sVWvLptIRrfAxB7u/nnas2OKlXV1v9PaYrboYLslI5uIVqjM/97cg9tb/j7mJC/i+Hqbm2F5C23Jg+rLZe2LLaOF6+W/F6VmskK/e922exOyQzKbrPLnpSmyoBd1UGbKr0+VQVsqgratcNMV5GytC2Yqk3VDg0xXJq50alq06V0VenCN7xK+e86pdiDSnWE1MMtZblDynKHlO2RstymeroDsgdqlG7UqIe9Vm7DL5evVJl2n1UtDoUkM2B14TaDdV2567bNUPRrGVZFOByWayulYG19aA6/P+iTGfDKMFsx8deqWdGvv399778Twy4lZ0kpvaTkbGuW65ReMpOzVZo8QOuS99fKqiStWLdRKxZ+qp+2H6jN6+z1xfMN4Y3LpeVSilNKdRoqkqkXd+TomCSH9ulh04B0m5x2Q9qZaZ2e2lvKrAvm4d/r8KzvGflSzyFNt7c148YbTBi3JpSrqhKH9bNXyyZ3C1teHNC2SlNVfikgU3bD0KaKkDZVmtpYEdLqspDWlIaa7FDgtksFGTb1STXUK9mm3smGclIMZbgMeRyGPA5pQ7mpWz/x6sFjkzQk06aVpSFNmV2j+452K2RKd8yv1eunJmt0nl1Ld4R0yn+qdO6+LhVk2rSmNCTJzwRyiJsuEdofe+wxXXHFFbr00kslSc8++6zeffddPf/88/rd734X59bFRk3Zds197XFp41eNDxaMldypkmzW2CXDJtMwrNc2myTDGstUd6z+tSHJkGk0cU7dsfrz6pmmoeb7YEXvN83dnNrEe81G5+7+zdHnN3Vu3T4zVLdZ1yDTrG9cM8+mada9JxR9DV9lo/+gm5I08FjJmVx/z8jP0dbgZ2u9Nnf9PiLfU4Nz616bMiRb3XVaoDW941rbk67x97OLhr8rrbx4fNrd+EDbfgdb0CJz17OaeI+5xzNa8CW07Byz0TnNv8ds7pymG1j3z07Df6ZCqv/nLrTLP2shBUJStbdWNZuWqkZu1citatOtGrlUk9pfVUGndlT5ZcpQSIZMGbLJVF66Q6lOyWMz5bGH5LGb8tglj92U227WPatun6xleAxDRt3D+udNkmx1r8PHbJFtaZfzm/0ZmtE/ErPJvdarXf6fL/pfY2aLft5mo2Om9e+KhnfdtTFR+5q+TvTxkBQyZTb1/YX3hazv0Iz6bkMya6uk9Z83uE9d2/ocLLmSJNmsOzf4748po9F/e0w1+PehJFN131nD/27t8hy5V12bzKh/51ttjPxzW7cvFDLlDRqqCRqqqfWrumiVvKZb1ar7PTTd8iX3lmlzKBAyVVxlBXa7gnIYIQ3IdCnVpcjvn8dmyuMwI797brs1dtVha/D7VPe7Jdsuv2914dQwQzLMgGQGZYSs0GqYAZlBv8xArUx/rfUceVjhNOT3qTYQlDdkl1dOeU23vHLVPZz6/+3de1BU5f8H8PdhYW+wy81dEAYloEySHxYMhrdkcthKHf2ljVNm2IWyyDI18TbezUZnSmOytBql5jellVONMRIRZpOkjUKlBn77wkIi4JVLXPb6/P5AVzcurQHuLrxfM2fiPOd5zn7O9nH3fPY5e7ZdZKG1RIEWqNAixuIvowqt4uo6lLC7+F4DALhxctcGXHF90hMAoEY7NGiFRmpFANqgkdqgvbquQSsCpDbIYYUfbPCDFb6wQkAOO3xgu7pY4Iu2a68XUKBNyNGO6//v2oUc4mq+CUgQAHxhhwrtUElmKGCBEmYoJQsUMHf8rQ2F0s8XSpihuFwGhWS5lrFX93A1fxyZeWN7x98CEqyQwSpksEIGC3zR3KhGU4MaTfBHg/DHWaFDlQhDE/wBVN3wzIwG/naVthYtUMKERgTABDlaLB2zzgDwcZkFH5d1XOLtIwF6tYQIRTAizAugP9yAgJByqKyN8D+yBUqY4SPZIWECcFYD6ZL5+jFIEiQAViFgsgImm4C5WQ2T9X9hOtIIk/I0TI0NMFky0fqTFm12X7SaV6Lt43MQqIMJfigTbwCFAHD98vGRoT5QyCSIq69vQnTMcJusQJtVoM3acSyWXtTDmyYoEKnp+cMBpW/H83XjTeAAQO0nOb5jrva79m/x72PRaSxvIEe3kiQ6n7l5FbPZDLVajc8++wwzZsxwtGdkZKChoQFffvllpzEmkwkm0/Wfa2hsbMSwYcPw559/QqvV3oqwb9q56gqkf/Afd4dBREREg4BCskAl2qGQ2aGUrNDYGhGslBDkZ4eftRnn23wwyr8BOt824K86SKpgmGVqNFl8UNPmhxCVDBbJD402BZqFEg02JS7b1bhiU6EVStjAy4xvpPG1IlQJ6GV/Qdd8Gvqh0QgN0qC2VYb/q1QjI/IchqtMED5+MPlq0Wz1wX+affFtrRJxGivarBLOm3xgsXv7b4QL+EmARUjQKW0IU9oRKLcjSG5HsMIOk1XCp1VqpA9tx1B1x6dEKpmAXulaxX++3QdttuvP0bWxVS0y5P7XH0/EtiJGY0VFsy8+/K8aGbEtGO5v6zT2Zh6Tbr3ahha8Vz0U+2aGIv5/kt0dTo+ampoQFRWFhoYGBAYGdtvP6z8junjxImw2G8LCwpzaw8LCUFZW1uWYzZs3Y926dZ3ao6Ki+iVGIiIiIqJ/a2MP2/7sYZs36+m4Puinx9z0t/WennfyfKnb3B2B65qbmwd20f5vLF++HIsWLXKs2+12XL58GaGhoV1eEkODw7VPujz5igvybswxuhWYZ9TfmGPU35hj1N88JceEEGhubkZERESP/by+aB8yZAhkMhnq6+ud2uvr6xEeHt7lGIVCAYVC4dQWFBTUXyGSl9FqtXyDoH7FHKNbgXlG/Y05Rv2NOUb9zRNyrKcZ9mu8/ocC5XI5kpKSUFh4/cZgdrsdhYWFSE1NdWNkRERERERERL3j9TPtALBo0SJkZGQgOTkZKSkp2LZtG1paWhx3kyciIiIiIiLyRgOiaJ89ezYuXLiA1atXo66uDqNHj8bBgwc73ZyOqCcKhQJr1qzp9NUJor7CHKNbgXlG/Y05Rv2NOUb9zdtyzOt/8o2IiIiIiIhooPL677QTERERERERDVQs2omIiIiIiIg8FIt2IiIiIiIiIg/Fop2IiIiIiIjIQ7Fop0Fp06ZNGDt2LNRqNYKCgrrsU11djSlTpkCtVkOv1+PVV1+F1Wp16nPo0CHcc889UCgUiIuLw549e/o/ePJKZ86cwfTp0zFkyBBotVqMHz8eRUVFTn1cyTminnz99dcYM2YMVCoVgoODMWPGDKftzDHqKyaTCaNHj4YkSSgtLXXa9uuvv2LChAlQKpWIiorCli1b3BMkeR2j0Yinn34at912G1QqFWJjY7FmzRqYzWanfswx6q23334b0dHRUCqVGDNmDI4dO+bukHrEop0GJbPZjEceeQTPP/98l9ttNhumTJkCs9mMI0eOIDc3F3v27MHq1asdfSorKzFlyhSkpaWhtLQUCxcuxDPPPIP8/PxbdRjkRaZOnQqr1YrvvvsOx48fR2JiIqZOnYq6ujoAruUcUU8+//xzzJ07F08++SR++eUX/Pjjj3jssccc25lj1JeWLl2KiIiITu1NTU1IT0/H8OHDcfz4cWzduhVr167Frl273BAleZuysjLY7Xbs3LkTp06dwptvvol3330XK1ascPRhjlFv7d27F4sWLcKaNWtw4sQJJCYmwmAw4Pz58+4OrXuCaBDbvXu3CAwM7NSel5cnfHx8RF1dnaPtnXfeEVqtVphMJiGEEEuXLhV33XWX07jZs2cLg8HQrzGT97lw4YIAIA4fPuxoa2pqEgBEQUGBEMK1nCPqjsViEZGRkeL999/vtg9zjPpKXl6euPPOO8WpU6cEAFFSUuLYtmPHDhEcHOyUU9nZ2WLEiBFuiJQGgi1btojbbrvNsc4co95KSUkRWVlZjnWbzSYiIiLE5s2b3RhVzzjTTtSF4uJiJCQkICwszNFmMBjQ1NSEU6dOOfpMnjzZaZzBYEBxcfEtjZU8X2hoKEaMGIEPP/wQLS0tsFqt2LlzJ/R6PZKSkgC4lnNE3Tlx4gRqamrg4+ODu+++G0OHDsWDDz6IkydPOvowx6gv1NfXIzMzEx999BHUanWn7cXFxZg4cSLkcrmjzWAwoLy8HFeuXLmVodIA0djYiJCQEMc6c4x6w2w24/jx407n8D4+Ppg8ebJHn8OzaCfqQl1dndOJLQDH+rXLmbvr09TUhLa2tlsTKHkFSZLw7bffoqSkBBqNBkqlEm+88QYOHjyI4OBgAK7lHFF3KioqAABr167FqlWrcODAAQQHB2PSpEm4fPkyAOYY9Z4QAvPmzcP8+fORnJzcZR/mGfWlP/74Azk5OXjuueccbcwx6o2LFy/CZrN1mUOenD8s2mnAWLZsGSRJ6nEpKytzd5g0gLiac0IIZGVlQa/X44cffsCxY8cwY8YMTJs2DbW1te4+DPJgruaY3W4HAKxcuRIzZ85EUlISdu/eDUmS8Omnn7r5KMjTuZpnOTk5aG5uxvLly90dMnmZf3OOVlNTgwceeACPPPIIMjMz3RQ5kWfwdXcARH1l8eLFmDdvXo99YmJiXNpXeHh4p7tI1tfXO7Zd+++1thv7aLVaqFQqF6Mmb+Zqzn333Xc4cOAArly5Aq1WCwDYsWMHCgoKkJubi2XLlrmUczT4uJpj1z78iY+Pd7QrFArExMSguroagGuvazQ43cxrWXFxMRQKhdO25ORkzJkzB7m5ud2+NwLMs8HsZs/Rzp07h7S0NIwdO7bTDeaYY9QbQ4YMgUwm6zKHPDl/WLTTgKHT6aDT6fpkX6mpqdi0aRPOnz8PvV4PACgoKIBWq3WcFKempiIvL89pXEFBAVJTU/skBvJ8ruZca2srgI7vTN3Ix8fHMUPqSs7R4ONqjiUlJUGhUKC8vBzjx48HAFgsFhiNRgwfPhwAc4y652qevfXWW9i4caNj/dy5czAYDNi7dy/GjBkDoCPPVq5cCYvFAj8/PwAdeTZixAjH14Fo8LmZc7SamhqkpaU5rhj6+3snc4x6Qy6XIykpCYWFhY6fRbXb7SgsLMSLL77o3uB64u474RG5Q1VVlSgpKRHr1q0TAQEBoqSkRJSUlIjm5mYhhBBWq1WMGjVKpKeni9LSUnHw4EGh0+nE8uXLHfuoqKgQarVavPrqq+L3338Xb7/9tpDJZOLgwYPuOizyUBcuXBChoaHi4YcfFqWlpaK8vFwsWbJE+Pn5idLSUiGEazlH1JOXX35ZREZGivz8fFFWViaefvppodfrxeXLl4UQzDHqe5WVlZ3uHt/Q0CDCwsLE3LlzxcmTJ8Unn3wi1Gq12Llzp/sCJa9x9uxZERcXJ+6//35x9uxZUVtb61iuYY5Rb33yySdCoVCIPXv2iNOnT4tnn31WBAUFOf26iqdh0U6DUkZGhgDQaSkqKnL0MRqN4sEHHxQqlUoMGTJELF68WFgsFqf9FBUVidGjRwu5XC5iYmLE7t27b+2BkNf4+eefRXp6uggJCREajUbce++9Ii8vz6mPKzlH1B2z2SwWL14s9Hq90Gg0YvLkyeLkyZNOfZhj1Je6KtqFEOKXX34R48ePFwqFQkRGRorXX3/dPQGS19m9e3eX52d/n2dkjlFv5eTkiGHDhgm5XC5SUlLETz/95O6QeiQJIYRbpviJiIiIiIiIqEe8ezwRERERERGRh2LRTkREREREROShWLQTEREREREReSgW7UREREREREQeikU7ERERERERkYdi0U5ERERERETkoVi0ExEREREREXkoFu1EREREREREHopFOxEREfWLQ4cOQZIkNDQ0/Ot9TJo0CQsXLuyzmIiIiLwNi3YiIiIvJElSj8vatWs7jWltbcXy5csRGxsLpVIJnU6H++67D19++aWjT3R0NLZt23bT8XRVXI8dOxa1tbUIDAz8x/HdFfj79+/Hhg0bbjoeIiKigcLX3QEQERHRzautrXX8vXfvXqxevRrl5eWOtoCAgE5j5s+fj6NHjyInJwfx8fG4dOkSjhw5gkuXLvVLjHK5HOHh4b3aR0hISB9FQ0RE5J04005EROSFwsPDHUtgYCAkSXJq66po/+qrr7BixQo89NBDiI6ORlJSEhYsWICnnnoKQMdseVVVFV555RXHjD0AXLp0CY8++igiIyOhVquRkJCAjz/+2LHfefPm4fvvv8f27dsd44xGY6fZ86qqKkybNg3BwcHw9/fHXXfdhby8PBiNRqSlpQEAgoODIUkS5s2b54jpxhl8k8mE7OxsREVFQaFQIC4uDh988EE/PMNERESegTPtREREg0R4eDjy8vLw8MMPQ6PRdNq+f/9+JCYm4tlnn0VmZqajvb29HUlJScjOzoZWq8XXX3+NuXPnIjY2FikpKdi+fTvOnDmDUaNGYf369QAAnU4Ho9HotP+srCyYzWYcPnwY/v7+OH36NAICAhAVFYXPP/8cM2fORHl5ObRaLVQqVZfH8MQTT6C4uBhvvfUWEhMTUVlZiYsXL/bdk0RERORhWLQTERENErt27cKcOXMQGhqKxMREjB8/HrNmzcK4ceMAdFyKLpPJoNFonC5rj4yMxJIlSxzrCxYsQH5+Pvbt24eUlBQEBgZCLpdDrVb3eDl8dXU1Zs6ciYSEBABATEyMY9u1y+D1ej2CgoK6HH/mzBns27cPBQUFmDx5cqd9EBERDUS8PJ6IiGiAqa6uRkBAgGN57bXXAAATJ05ERUUFCgsLMWvWLJw6dQoTJkz4xxu92Ww2bNiwAQkJCQgJCUFAQADy8/NRXV19U3G99NJL2LhxI8aNG4c1a9bg119/vanxpaWlkMlkuO+++25qHBERkTdj0U5ERDTAREREoLS01LHMnz/fsc3Pzw8TJkxAdnY2vvnmG6xfvx4bNmyA2Wzudn9bt27F9u3bkZ2djaKiIpSWlsJgMPQ4pivPPPMMKioqMHfuXPz2229ITk5GTk6Oy+O7u2SeiIhoIGPRTkRENMD4+voiLi7OsfR0B/b4+HhYrVa0t7cD6Ljju81mc+rz448/Yvr06Xj88ceRmJiImJgYnDlzxqlPV+O6EhUVhfnz52P//v1YvHgx3nvvPcd4AD3uIyEhAXa7Hd9///0/Pg4REdFAwaKdiIhokJg0aRJ27tyJ48ePw2g0Ii8vDytWrEBaWhq0Wi2Ajt9pP3z4MGpqahw3eLv99ttRUFCAI0eO4Pfff8dzzz2H+vp6p31HR0fj6NGjMBqNuHjxIux2e6fHX7hwIfLz81FZWYkTJ06gqKgII0eOBAAMHz4ckiThwIEDuHDhAv76669O46Ojo5GRkYGnnnoKX3zxBSorK3Ho0CHs27evr58qIiIij8GinYiIaJAwGAzIzc1Feno6Ro4ciQULFsBgMDgVvevXr4fRaERsbCx0Oh0AYNWqVbjnnntgMBgwadIkhIeHY8aMGU77XrJkCWQyGeLj46HT6br8vrvNZkNWVhZGjhyJBx54AHfccQd27NgBoONmd+vWrcOyZcsQFhaGF198sctjeOeddzBr1iy88MILuPPOO5GZmYmWlpY+eoaIiIg8jySEEO4OgoiIiIiIiIg640w7ERERERERkYdi0U5ERERERETkoVi0ExEREREREXkoFu1EREREREREHopFOxEREREREZGHYtFORERERERE5KFYtBMRERERERF5KBbtRERERERERB6KRTsRERERERGRh2LRTkREREREROShWLQTEREREREReaj/B/uvx2C1+SqmAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA+0AAAK9CAYAAABRvo1QAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAACM8klEQVR4nOzdd3xUVf7/8fdMeu+dkITeO0IUpQpS3GXBVVwU9eeqXwRcbKvYxYIVbCh2FHUVu6IgHZUmRTqElhAgJAHSe5n5/REYjRBIwiRzk7yej8c8JnPvued+bhJ2fefcc67JarVaBQAAAAAADMfs6AIAAAAAAMDZEdoBAAAAADAoQjsAAAAAAAZFaAcAAAAAwKAI7QAAAAAAGBShHQAAAAAAgyK0AwAAAABgUIR2AAAAAAAMitAOAAAAAIBBEdoBADjlsccek8lkqpdzDRgwQAMGDLB9XrlypUwmk7744ot6Of+NN96o2NjYejlXbeXl5enf//63wsPDZTKZNHXqVEeXVC0mk0mPPfaYo8sAADQShHYAQKM0d+5cmUwm28vd3V2RkZEaNmyYXnnlFeXm5trlPCkpKXrssce0ZcsWu/RnT0aurTqefvppzZ07VxMnTtS8efN0/fXXV9k2Nja20s87NDRUl156qb7++usqj7njjjtkMpm0f//+Kts8+OCDMplM2rZt2wVdCwAAtUVoBwA0atOnT9e8efP0xhtvaMqUKZKkqVOnqnPnzmcEsYceekiFhYU16j8lJUWPP/54jYPx4sWLtXjx4hodU1Pnqu3tt99WQkJCnZ7/Qi1fvlx9+/bVo48+quuuu049e/Y8Z/tu3bpp3rx5mjdvnu655x6lpKRozJgxmjNnzlnbjx8/XpL0ySefVNnn//73P3Xu3FldunSp/YUAAHABCO0AgEZt+PDhuu6663TTTTdp2rRp+umnn7R06VKlp6frb3/7W6WQ7uzsLHd39zqtp6CgQJLk6uoqV1fXOj3Xubi4uMjNzc1h56+O9PR0+fv7V7t9VFSUrrvuOl133XX673//q9WrV8vLy0uzZs06a/s+ffqoVatW+t///nfW/WvXrlViYqIt3AMA4AiEdgBAkzNo0CA9/PDDOnTokD766CPb9rPNaV+yZIn69esnf39/eXt7q23btnrggQckVcxD7927tyTppptust2aPXfuXEkV89Y7deqkTZs26bLLLpOnp6ft2L/OaT+tvLxcDzzwgMLDw+Xl5aW//e1vOnz4cKU2sbGxuvHGG8849s99nq+2s81pz8/P1913363o6Gi5ubmpbdu2euGFF2S1Wiu1M5lMmjx5sr755ht16tRJbm5u6tixoxYtWnT2b/hfpKen6+abb1ZYWJjc3d3VtWtXffDBB7b9p+f3JyYm6ocffrDVnpSUVK3+TwsPD1f79u2VmJhYZZvx48drz5492rx58xn7PvnkE5lMJl177bUqKSnRI488op49e8rPz09eXl669NJLtWLFivPWUdX6AVWtofDRRx+pZ8+e8vDwUGBgoMaNG3fG7wAAoOkgtAMAmqTT86PPdYv6zp07NWrUKBUXF2v69Ol68cUX9be//U2rV6+WJLVv317Tp0+XJN166622W7Mvu+wyWx8nT57U8OHD1a1bN7300ksaOHDgOet66qmn9MMPP+i+++7THXfcoSVLlmjIkCE1vm2/OrX9mdVq1d/+9jfNmjVLV1xxhWbOnKm2bdvq3nvv1V133XVG+19//VW33367xo0bp+eee05FRUUaO3asTp48ec66CgsLNWDAAM2bN0/jx4/X888/Lz8/P9144416+eWXbbXPmzdPwcHBlW55DwkJqdH3oLS0VIcPH1ZQUFCVbaq6Rb68vFzz58/XpZdequbNmysnJ0fvvPOOBgwYoGeffVaPPfaYjh8/rmHDhtl1zYCnnnpKEyZMUOvWrTVz5kxNnTpVy5Yt02WXXaasrCy7nQcA0IBYAQBohN5//32rJOuGDRuqbOPn52ft3r277fOjjz5q/fP/Nc6aNcsqyXr8+PEq+9iwYYNVkvX9998/Y1///v2tkqxz5sw5677+/fvbPq9YscIqyRoVFWXNycmxbZ8/f75VkvXll1+2bYuJibHecMMN5+3zXLXdcMMN1piYGNvnb775xirJ+uSTT1Zqd9VVV1lNJpN1//79tm2SrK6urpW2bd261SrJ+uqrr55xrj976aWXrJKsH330kW1bSUmJNT4+3urt7V3p2mNiYqwjR448Z39/bjt06FDr8ePHrcePH7du3brVOm7cOKsk65QpU855bO/eva3NmjWzlpeX27YtWrTIKsn65ptvWq1Wq7WsrMxaXFxc6bjMzExrWFiY9f/9v/9Xabsk66OPPmr7/Nfv9Wl//X1LSkqyOjk5WZ966qlK7bZv3251dnY+YzsAoGlgpB0A0GR5e3ufcxX50/Opv/32W1ksllqdw83NTTfddFO120+YMEE+Pj62z1dddZUiIiL0448/1ur81fXjjz/KyclJd9xxR6Xtd999t6xWqxYuXFhp+5AhQ9SyZUvb5y5dusjX11cHDx4873nCw8N17bXX2ra5uLjojjvuUF5enlatWlXra1i8eLFCQkIUEhKirl276vPPP9f111+vZ5999pzHXXfddTpy5Ih+/vln27ZPPvlErq6u+uc//ylJcnJysq1BYLFYlJGRobKyMvXq1eust9bXxldffSWLxaKrr75aJ06csL3Cw8PVunXrat2KDwBofAjtAIAmKy8vr1JA/qtrrrlGl1xyif79738rLCxM48aN0/z582sU4KOiomq04Fzr1q0rfTaZTGrVqlWN53PX1KFDhxQZGXnG96N9+/a2/X/WvHnzM/oICAhQZmbmec/TunVrmc2V/xOkqvPURJ8+fbRkyRItXbpUa9as0YkTJ/Thhx/Kw8NDkpSamlrpdXrKwbhx4+Tk5GS7Rb6oqEhff/21hg8froCAAFv/H3zwgbp06SJ3d3cFBQUpJCREP/zwg7Kzs2td85/t27dPVqtVrVu3tv3x4fRr9+7dSk9Pt8t5AAANi7OjCwAAwBGOHDmi7OxstWrVqso2Hh4e+vnnn7VixQr98MMPWrRokT777DMNGjRIixcvlpOT03nPczow2tPZFi+TKuZhV6cme6jqPNa/LFpXn4KDgzVkyJAq90dERFT6/P777+vGG29UaGioLr/8cn355ZeaPXu2vv/+e+Xm5lZaNf6jjz7SjTfeqNGjR+vee+9VaGionJycNGPGDB04cOCcdZ3r5/VnFotFJpNJCxcuPOv319vb+5znAQA0ToR2AECTNG/ePEnSsGHDztnObDZr8ODBGjx4sGbOnKmnn35aDz74oFasWKEhQ4ZUGchqa9++fZU+W61W7d+/v9JzwgMCAs66KNmhQ4fUokUL2+ea1BYTE6OlS5cqNze30mj7nj17bPvtISYmRtu2bZPFYqk02m7v85zNkiVLKn3u2LGj7evx48dr0aJFWrhwoT755BP5+vrqyiuvtO3/4osv1KJFC3311VeVvq+PPvroec97rp/Xn7Vs2VJWq1VxcXFq06ZNdS8LANDIcXs8AKDJWb58uZ544gnFxcWd8xncGRkZZ2zr1q2bJKm4uFiS5OXlJUl2W9n7ww8/rDTP/osvvtCxY8c0fPhw27aWLVtq3bp1KikpsW1bsGDBGY8Fq0ltI0aMUHl5uV577bVK22fNmiWTyVTp/BdixIgRSk1N1WeffWbbVlZWpldffVXe3t7q37+/Xc5zNkOGDKn0+vPI++jRo+Xp6anXX39dCxcu1JgxY+Tu7m7bf3rk+893Eqxfv15r164973lbtmyp7Oxsbdu2zbbt2LFj+vrrryu1GzNmjJycnPT444+fcceC1Wo978r8AIDGiZF2AECjtnDhQu3Zs0dlZWVKS0vT8uXLtWTJEsXExOi7776rFMz+avr06fr55581cuRIxcTEKD09Xa+//rqaNWumfv36SaoIZP7+/pozZ458fHzk5eWlPn36KC4urlb1BgYGql+/frrpppuUlpaml156Sa1atdItt9xia/Pvf/9bX3zxha644gpdffXVOnDggD766KNKC8PVtLYrr7xSAwcO1IMPPqikpCR17dpVixcv1rfffqupU6ee0Xdt3XrrrXrzzTd14403atOmTYqNjdUXX3yh1atX66WXXjrnGgN1ydvbW6NHj7bNa//rH3NGjRqlr776Sv/4xz80cuRIJSYmas6cOerQoYPy8vLO2fe4ceN033336R//+IfuuOMOFRQU6I033lCbNm0qLWLXsmVLPfnkk5o2bZqSkpI0evRo+fj4KDExUV9//bVuvfVW3XPPPfa/eACAsTlu4XoAAOrO6Ue+nX65urpaw8PDrZdffrn15ZdfrvRosdP++giuZcuWWf/+979bIyMjra6urtbIyEjrtddea927d2+l47799ltrhw4drM7OzpUesda/f39rx44dz1pfVY98+9///medNm2aNTQ01Orh4WEdOXKk9dChQ2cc/+KLL1qjoqKsbm5u1ksuucS6cePGM/o8V21newxZbm6u9c4777RGRkZaXVxcrK1bt7Y+//zzVovFUqmdJOukSZPOqKmqR9H9VVpamvWmm26yBgcHW11dXa2dO3c+62PpavrIt+q2rcoPP/xglWSNiIio9Pg3q9VqtVgs1qefftoaExNjdXNzs3bv3t26YMGCs34f9ZdHvlmtVuvixYutnTp1srq6ulrbtm1r/eijj874fTvtyy+/tPbr18/q5eVl9fLysrZr1846adIka0JCwgVdHwCgYTJZrQ5cMQYAAAAAAFSJOe0AAAAAABgUoR0AAAAAAIMitAMAAAAAYFCEdgAAAAAADIrQDgAAAACAQRHaAQAAAAAwKGdHF2AEFotFKSkp8vHxkclkcnQ5AAAAAIBGzmq1Kjc3V5GRkTKbqx5PJ7RLSklJUXR0tKPLAAAAAAA0MYcPH1azZs2q3E9ol+Tj4yOp4pvl6+vr4GoAAAAAAI1dTk6OoqOjbXm0KoR2yXZLvK+vL6EdAAAAAFBvzjdFm4XoAAAAAAAwKEI7AAAAAAAGRWgHAAAAAMCgmNMOAAAAAOdhtVpVVlam8vJyR5eCBsLJyUnOzs4X/FhxQjsAAAAAnENJSYmOHTumgoICR5eCBsbT01MRERFydXWtdR+EdgAAAACogsViUWJiopycnBQZGSlXV9cLHjlF42e1WlVSUqLjx48rMTFRrVu3ltlcu9nphHYAAAAAqEJJSYksFouio6Pl6enp6HLQgHh4eMjFxUWHDh1SSUmJ3N3da9UPC9EBAAAAwHnUdpQUTZs9fm/4zQMAAAAAwKAI7QAAAAAAGBShHQAAAABQI3PnzpW/v/8F92MymfTNN99ccD+NGaEdAAAAAJqgG2+8UaNHj3Z0GTgPQjsAAAAAAAZFaAcAAAAAVDJz5kx17txZXl5eio6O1u233668vLwz2n3zzTdq3bq13N3dNWzYMB0+fLjS/m+//VY9evSQu7u7WrRooccff1xlZWX1dRmNAqEdAAAAAFCJ2WzWK6+8op07d+qDDz7Q8uXL9d///rdSm4KCAj311FP68MMPtXr1amVlZWncuHG2/b/88osmTJig//znP9q1a5fefPNNzZ07V0899VR9X06DRmgHAAAAAFQydepUDRw4ULGxsRo0aJCefPJJzZ8/v1Kb0tJSvfbaa4qPj1fPnj31wQcfaM2aNfrtt98kSY8//rjuv/9+3XDDDWrRooUuv/xyPfHEE3rzzTcdcUkNlrOjCwAAAAAAGMvSpUs1Y8YM7dmzRzk5OSorK1NRUZEKCgrk6ekpSXJ2dlbv3r1tx7Rr107+/v7avXu3LrroIm3dulWrV6+uNLJeXl5+Rj84N0I7AAAAAMAmKSlJo0aN0sSJE/XUU08pMDBQv/76q26++WaVlJRUO2zn5eXp8ccf15gxY87Y5+7ubu+yGy1COwAAAADAZtOmTbJYLHrxxRdlNlfMqP7rrfGSVFZWpo0bN+qiiy6SJCUkJCgrK0vt27eXJPXo0UMJCQlq1apV/RXfCBHaAQAAAKCJys7O1pYtWyptCw4OVmlpqV599VVdeeWVWr16tebMmXPGsS4uLpoyZYpeeeUVOTs7a/Lkyerbt68txD/yyCMaNWqUmjdvrquuukpms1lbt27Vjh079OSTT9bH5TUKLEQHAAAAAE3UypUr1b1790qvefPmaebMmXr22WfVqVMnffzxx5oxY8YZx3p6euq+++7Tv/71L11yySXy9vbWZ599Zts/bNgwLViwQIsXL1bv3r3Vt29fzZo1SzExMfV5iQ2eyWq1Wh1dhKPl5OTIz89P2dnZ8vX1dXQ5AAAAAAyiqKhIiYmJiouLYx42auxcvz/VzaGMtAMAAAAAYFCEdgAAAAAADIrQDgAAAACAQbF6fENz8oBUnFu9tm4+UlDLuq0HAAAAAFBnCO0NyckD0qs9anbMlM0EdwAAAABooAjtDcnpEfZL75b8os/dNvuw9MuL1R+VBwAAAAAYDqG9IfKLloJaOboKAAAAAEAdYyE6AAAAAAAMipF2AAAAAKiho1mFyswvqbfzBXi5Ksrfo97OB+MgtAMAAABADRzNKtTgF1eqqNRSb+d0dzFr2d0D6i24JyUlKS4uTr///ru6detWrWPmzp2rqVOnKisry6F1NDaEdgAAAACogcz8EhWVWjRpYKt6CdFHswo1e8V+ZeaX1Ph8hw8f1qOPPqpFixbpxIkTioiI0OjRo/XII48oKCioyuOio6N17NgxBQcHV/tc11xzjUaMGFGj+nB+hHYAAAAAqIUofw/FBXs5uowqHTx4UPHx8WrTpo3+97//KS4uTjt37tS9996rhQsXat26dQoMDDzjuJKSErm6uio8PLxG5/Pw8JCHB7fw2xsL0QEAAABAIzRp0iS5urpq8eLF6t+/v5o3b67hw4dr6dKlOnr0qB588EFJUmxsrJ544glNmDBBvr6+uvXWW5WUlCSTyaQtW7bY+vvuu+/UunVrubu7a+DAgfrggw9kMplst8PPnTtX/v7+tvaPPfaYunXrpnnz5ik2NlZ+fn4aN26ccnP/eCz1okWL1K9fP/n7+ysoKEijRo3SgQMH6uPb02AQ2gEAAACgkcnIyNBPP/2k22+//YzR7/DwcI0fP16fffaZrFarJOmFF15Q165d9fvvv+vhhx8+o7/ExERdddVVGj16tLZu3arbbrvNFvrP5cCBA/rmm2+0YMECLViwQKtWrdIzzzxj25+fn6+77rpLGzdu1LJly2Q2m/WPf/xDFkv9rRdgdNweDwAAAACNzL59+2S1WtW+ffuz7m/fvr0yMzN1/PhxSdKgQYN099132/YnJSVVav/mm2+qbdu2ev755yVJbdu21Y4dO/TUU0+dsw6LxaK5c+fKx8dHknT99ddr2bJltuPGjh1bqf17772nkJAQ7dq1S506dar+BTdijLQDAAAAQCN1eiT9fHr16nXO/QkJCerdu3elbRdddNF5+42NjbUFdkmKiIhQenq67fO+fft07bXXqkWLFvL19VVsbKwkKTk5uVp1NwWEdgAAAABoZFq1aiWTyaTdu3efdf/u3bsVEBCgkJAQSZKXV90sqOfi4lLps8lkqnTr+5VXXqmMjAy9/fbbWr9+vdavXy+pYjE8VCC0AwAAAEAjExQUpMsvv1yvv/66CgsLK+1LTU3Vxx9/rGuuuUYmk6la/bVt21YbN26stG3Dhg0XVOPJkyeVkJCghx56SIMHD7bdso/KmNMOAAAAALVwNKvw/I0ceJ7XXntNF198sYYNG6Ynn3yy0iPfoqKizjsf/c9uu+02zZw5U/fdd59uvvlmbdmyRXPnzpWkagf/vwoICFBQUJDeeustRUREKDk5Wffff3+t+mrMCO0AAAAAUAMBXq5ydzFr9or99XZOdxezArxca3RM69attXHjRj366KO6+uqrlZGRofDwcI0ePVqPPvroWZ/RXpW4uDh98cUXuvvuu/Xyyy8rPj5eDz74oCZOnCg3N7eaXo4kyWw269NPP9Udd9yhTp06qW3btnrllVc0YMCAWvXXWJms1V2ZoBHLycmRn5+fsrOz5evr6+hyqpayRXqrvzTqJSmo1bnbntwvLZgq3bpKiuxW97UBAAAAjVBRUZESExMVFxcnd3d32/ajWYXKzK+/edcBXq6K8vc4f8N69NRTT2nOnDk6fPiwo0sxrKp+f6Tq51BG2gEAAACghqL8PQwXouva66+/rt69eysoKEirV6/W888/r8mTJzu6rEaP0A4AAAAAOK99+/bpySefVEZGhpo3b667775b06ZNc3RZjR6hHQAAAABwXrNmzdKsWbMcXUaTwyPfAAAAAAAwKEI7AAAAAAAGRWgHAAAAAMCgCO0AAAAAABgUoR0AAAAAAINi9XgAAAAAqKmsw1LByfo7n2eQ5B9df+eDYRDaAQAAAKAmsg5Ls3tLpYX1d04XD2nSBoL7BYiNjdXUqVM1depUu/U5YMAAdevWTS+99JLd+vwrQjsAAAAA1ETByYrAfundkl89hOjsw9IvL1act5qhfc6cObr33nuVmZkpZ+eK2JeXl6eAgABdcsklWrlypa3typUrNXDgQO3fv18tW7asss/T7TIzM+Xv738hV4QaILQDAAAAQG34RUtBrRxdxVkNHDhQeXl52rhxo/r27StJ+uWXXxQeHq7169erqKhI7u7ukqQVK1aoefPm5wzs9mS1WlVeXm77Y0JdKykpkaura72cqy6wEB0AAAAANDJt27ZVRETEGSPqf//73xUXF6d169ZV2j5w4EDNmzdPvXr1ko+Pj8LDw/Wvf/1L6enpkqSkpCQNHDhQkhQQECCTyaQbb7xRkmSxWDRjxgzFxcXJw8NDXbt21RdffFGpf5PJpIULF6pnz55yc3PTr7/+qgEDBmjKlCmaOnWqAgICFBYWprffflv5+fm66aab5OPjo1atWmnhwoW2vsrLy3XzzTfbztW2bVu9/PLLla79xhtv1OjRo/XUU08pMjJSbdu2Pev36J133pG/v7+WLVsmSdqxY4eGDx8ub29vhYWF6frrr9eJEyds7fPz8zVhwgR5e3srIiJCL774Yi1+MjVHaAcAAACARmjgwIFasWKF7fOKFSs0YMAA9e/f37a9sLBQ69ev18CBA1VaWqonnnhCW7du1TfffKOkpCRbMI+OjtaXX34pSUpISNCxY8dsYXnGjBn68MMPNWfOHO3cuVN33nmnrrvuOq1atapSPffff7+eeeYZ7d69W126dJEkffDBBwoODtZvv/2mKVOmaOLEifrnP/+piy++WJs3b9bQoUN1/fXXq6CgQFLFHwiaNWumzz//XLt27dIjjzyiBx54QPPnz690rmXLlikhIUFLlizRggULzvjePPfcc7r//vu1ePFiDR48WFlZWRo0aJC6d++ujRs3atGiRUpLS9PVV19tO+bee+/VqlWr9O2332rx4sVauXKlNm/efCE/omrh9ngAAAAAaIQGDhyoqVOnqqysTIWFhfr999/Vv39/lZaWas6cOZKktWvXqri4WAMHDlTz5s1tx7Zo0UKvvPKKevfurby8PHl7eyswMFCSFBoaapvTXlxcrKefflpLly5VfHy87dhff/1Vb775pvr372/rc/r06br88ssr1di1a1c99NBDkqRp06bpmWeeUXBwsG655RZJ0iOPPKI33nhD27ZtU9++feXi4qLHH3/cdnxcXJzWrl2r+fPnVwrYXl5eeuedd856W/x9992nefPmadWqVerYsaMk6bXXXlP37t319NNP29q99957io6O1t69exUZGal3331XH330kQYPHiyp4g8OzZo1q8mPpFYI7QAAAADQCA0YMED5+fnasGGDMjMz1aZNG4WEhKh///666aabVFRUpJUrV6pFixZq3ry5Nm3apMcee0xbt25VZmamLBaLJCk5OVkdOnQ46zn279+vgoKCM8J4SUmJunfvXmlbr169zjj+9Ii7JDk5OSkoKEidO3e2bQsLC5Mk2236kjR79my99957Sk5OVmFhoUpKStStW7dK/Xbu3Pmsgf3FF19Ufn6+Nm7cqBYtWti2b926VStWrJC3t/cZxxw4cMB2nj59+ti2BwYGVnnrvT0R2gEAAACgEWrVqpWaNWumFStWKDMz0zbqHRkZqejoaK1Zs0YrVqzQoEGDlJ+fr2HDhmnYsGH6+OOPFRISouTkZA0bNkwlJSVVniMvL0+S9MMPPygqKqrSPjc3t0qfvby8zjjexcWl0meTyVRpm8lkkiTbHxA+/fRT3XPPPXrxxRcVHx8vHx8fPf/881q/fv15zyVJl156qX744QfNnz9f999/f6XruPLKK/Xss8+ecUxERIT2799/1v7qA6EdAAAAABqpgQMHauXKlcrMzNS9995r237ZZZdp4cKF+u233zRx4kTt2bNHJ0+e1DPPPKPo6IrHym3cuLFSX6dHrsvLy23bOnToIDc3NyUnJ1e6Fb6urF69WhdffLFuv/1227YDBw5U+/iLLrpIkydP1hVXXCFnZ2fdc889kqQePXroyy+/VGxs7FlXtW/ZsqVcXFy0fv162zSCzMxM7d27t86vm9AOAAAAALWRfdjw5xk4cKAmTZqk0tLSSuGyf//+mjx5skpKSjRw4EA5OzvL1dVVr776qv7v//5PO3bs0BNPPFGpr5iYGJlMJi1YsEAjRoyQh4eHfHx8dM899+jOO++UxWJRv379lJ2drdWrV8vX11c33HBDrWs/m9atW+vDDz/UTz/9pLi4OM2bN08bNmxQXFxctfu4+OKL9eOPP2r48OFydnbW1KlTNWnSJL399tu69tpr9d///leBgYHav3+/Pv30U73zzjvy9vbWzTffrHvvvVdBQUEKDQ3Vgw8+KLO57td2J7QDAAAAQE14BkkuHtIv9fPIL0kV5/MMqvFhAwcOVGFhodq1a2ebHy5VhPbc3Fzbo+Ekae7cuXrggQf0yiuvqEePHnrhhRf0t7/9zXZMVFSUHn/8cd1///266aabNGHCBM2dO1dPPPGEQkJCNGPGDB08eFD+/v7q0aOHHnjggQu/7r+47bbb9Pvvv+uaa66RyWTStddeq9tvv73SY+Gqo1+/fvrhhx80YsQIOTk5acqUKVq9erXuu+8+DR06VMXFxYqJidEVV1xhC+bPP/+87TZ6Hx8f3X333crOzrb7Nf6VyWq1Wuv8LAaXk5MjPz8/ZWdny9fX19HlVC1li/RWf2nUS1JQq3O3PblfWjBVunWVFNmt7msDAAAAGqGioiIlJiYqLi5O7u7uf+zIOiwVnKy/QjyDJP/o+jsf7KLK3x9VP4cy0g4AAAAANeUfTYhGvaj7G/ABAAAAAECtENoBAAAAADAoQjsAAAAAAAZFaAcAAACA82D9btSGPX5vCO0AAAAAUAUXFxdJUkFBgYMrQUN0+vfm9O9RbTh89fijR4/qvvvu08KFC1VQUKBWrVrp/fffV69evSRV/GXi0Ucf1dtvv62srCxdcskleuONN9S6dWtbHxkZGZoyZYq+//57mc1mjR07Vi+//LK8vb0ddVkAAAAAGgEnJyf5+/srPT1dkuTp6SmTyeTgqmB0VqtVBQUFSk9Pl7+/v5ycnGrdl0NDe2Zmpi655BINHDhQCxcuVEhIiPbt26eAgABbm+eee06vvPKKPvjgA8XFxenhhx/WsGHDtGvXLttz7saPH69jx45pyZIlKi0t1U033aRbb71Vn3zyiaMuDQAAAEAjER4eLkm24A5Ul7+/v+33p7YcGtqfffZZRUdH6/3337dti4uLs31ttVr10ksv6aGHHtLf//53SdKHH36osLAwffPNNxo3bpx2796tRYsWacOGDbbR+VdffVUjRozQCy+8oMjIyPq9KAAAAACNislkUkREhEJDQ1VaWuroctBAuLi4XNAI+2kODe3fffedhg0bpn/+859atWqVoqKidPvtt+uWW26RJCUmJio1NVVDhgyxHePn56c+ffpo7dq1GjdunNauXSt/f39bYJekIUOGyGw2a/369frHP/5xxnmLi4tVXFxs+5yTk1OHVwkAAACgMXBycrJLCANqwqEL0R08eNA2P/2nn37SxIkTdccdd+iDDz6QJKWmpkqSwsLCKh0XFhZm25eamqrQ0NBK+52dnRUYGGhr81czZsyQn5+f7RUdHW3vSwMAAAAA4II5NLRbLBb16NFDTz/9tLp3765bb71Vt9xyi+bMmVOn5502bZqys7Ntr8OHD9fp+QAAAAAAqA2HhvaIiAh16NCh0rb27dsrOTlZ0h8LPqSlpVVqk5aWZtsXHh5+xoIQZWVlysjIqHLCv5ubm3x9fSu9AAAAAAAwGoeG9ksuuUQJCQmVtu3du1cxMTGSKhalCw8P17Jly2z7c3JytH79esXHx0uS4uPjlZWVpU2bNtnaLF++XBaLRX369KmHqwAAAAAAoG44dCG6O++8UxdffLGefvppXX311frtt9/01ltv6a233pJUsUrj1KlT9eSTT6p169a2R75FRkZq9OjRkipG5q+44grbbfWlpaWaPHmyxo0bx8rxAAAAAIAGzaGhvXfv3vr66681bdo0TZ8+XXFxcXrppZc0fvx4W5v//ve/ys/P16233qqsrCz169dPixYtsj2jXZI+/vhjTZ48WYMHD5bZbNbYsWP1yiuvOOKSAAAAAACwG5PVarU6ughHy8nJkZ+fn7Kzs409vz1li/RWf2nUS1JQq3O3PblfWjBVunWVFNmt7msDAAAAAFRbdXOoQ+e0AwAAAACAqhHaAQAAAAAwKEI7AAAAAAAGRWgHAAAAAMCgCO0AAAAAABgUoR0AAAAAAIMitAMAAAAAYFCEdgAAAAAADIrQDgAAAACAQRHaAQAAAAAwKEI7AAAAAAAGRWgHAAAAAMCgCO0AAAAAABgUoR0AAAAAAIMitAMAAAAAYFCEdgAAAAAADIrQDgAAAACAQRHaAQAAAAAwKEI7AAAAAAAGRWgHAAAAAMCgCO0AAAAAABgUoR0AAAAAAIMitAMAAAAAYFCEdgAAAAAADIrQDgAAAACAQRHaAQAAAAAwKEI7AAAAAAAGRWgHAAAAAMCgCO0AAAAAABgUoR0AAAAAAIMitAMAAAAAYFCEdgAAAAAADIrQDgAAAACAQRHaAQAAAAAwKEI7AAAAAAAGRWgHAAAAAMCgCO0AAAAAABgUoR0AAAAAAIMitAMAAAAAYFCEdgAAAAAADIrQDgAAAACAQRHaAQAAAAAwKEI7AAAAAAAGRWgHAAAAAMCgCO0AAAAAABgUoR0AAAAAAIMitAMAAAAAYFCEdgAAAAAADIrQDgAAAACAQRHaAQAAAAAwKEI7AAAAAAAGRWgHAAAAAMCgCO0AAAAAABgUoR0AAAAAAIMitAMAAAAAYFCEdgAAAAAADIrQDgAAAACAQRHaAQAAAAAwKEI7AAAAAAAGRWgHAAAAAMCgCO0AAAAAABgUoR0AAAAAAIMitAMAAAAAYFCEdgAAAAAADIrQDgAAAACAQRHaAQAAAAAwKEI7AAAAAAAGRWgHAAAAAMCgCO0AAAAAABgUoR0AAAAAAIMitAMAAAAAYFCEdgAAAAAADIrQDgAAAACAQRHaAQAAAAAwKEI7AAAAAAAGRWgHAAAAAMCgCO0AAAAAABgUoR0AAAAAAIMitAMAAAAAYFAODe2PPfaYTCZTpVe7du1s+4uKijRp0iQFBQXJ29tbY8eOVVpaWqU+kpOTNXLkSHl6eio0NFT33nuvysrK6vtSAAAAAACwO2dHF9CxY0ctXbrU9tnZ+Y+S7rzzTv3www/6/PPP5efnp8mTJ2vMmDFavXq1JKm8vFwjR45UeHi41qxZo2PHjmnChAlycXHR008/Xe/XAgAAAACAPTk8tDs7Oys8PPyM7dnZ2Xr33Xf1ySefaNCgQZKk999/X+3bt9e6devUt29fLV68WLt27dLSpUsVFhambt266YknntB9992nxx57TK6urvV9OQAAAAAA2I3D57Tv27dPkZGRatGihcaPH6/k5GRJ0qZNm1RaWqohQ4bY2rZr107NmzfX2rVrJUlr165V586dFRYWZmszbNgw5eTkaOfOnVWes7i4WDk5OZVeAAAAAAAYjUNDe58+fTR37lwtWrRIb7zxhhITE3XppZcqNzdXqampcnV1lb+/f6VjwsLClJqaKklKTU2tFNhP7z+9ryozZsyQn5+f7RUdHW3fCwMAAAAAwA4cenv88OHDbV936dJFffr0UUxMjObPny8PD486O++0adN011132T7n5OQQ3AEAAAAAhuPw2+P/zN/fX23atNH+/fsVHh6ukpISZWVlVWqTlpZmmwMfHh5+xmrypz+fbZ78aW5ubvL19a30AgAAAADAaAwV2vPy8nTgwAFFRESoZ8+ecnFx0bJly2z7ExISlJycrPj4eElSfHy8tm/frvT0dFubJUuWyNfXVx06dKj3+gEAAAAAsCeH3h5/zz336Morr1RMTIxSUlL06KOPysnJSddee638/Px0880366677lJgYKB8fX01ZcoUxcfHq2/fvpKkoUOHqkOHDrr++uv13HPPKTU1VQ899JAmTZokNzc3R14aAAAAAAAXzKGh/ciRI7r22mt18uRJhYSEqF+/flq3bp1CQkIkSbNmzZLZbNbYsWNVXFysYcOG6fXXX7cd7+TkpAULFmjixImKj4+Xl5eXbrjhBk2fPt1RlwQAAAAAgN04NLR/+umn59zv7u6u2bNna/bs2VW2iYmJ0Y8//mjv0gAAAAAAcDhDzWkHAAAAAAB/ILQDAAAAAGBQhHYAAAAAAAyK0A4AAAAAgEER2gEAAAAAMChCOwAAAAAABkVoBwAAAADAoAjtAAAAAAAYFKEdAAAAAACDIrQDAAAAAGBQhHYAAAAAAAyK0A4AAAAAgEER2gEAAAAAMChCOwAAAAAABkVoBwAAAADAoAjtAAAAAAAYFKEdAAAAAACDIrQDAAAAAGBQhHYAAAAAAAyK0A4AAAAAgEER2gEAAAAAMChCOwAAAAAABkVoBwAAAADAoAjtAAAAAAAYFKEdAAAAAACDIrQDAAAAAGBQhHYAAAAAAAyK0A4AAAAAgEER2gEAAAAAMChCOwAAAAAABkVoBwAAAADAoAjtAAAAAAAYFKEdAAAAAACDIrQDAAAAAGBQhHYAAAAAAAyK0A4AAAAAgEER2gEAAAAAMChCOwAAAAAABkVoBwAAAADAoAjtAAAAAAAYFKEdAAAAAACDIrQDAAAAAGBQhHYAAAAAAAyK0A4AAAAAgEER2gEAAAAAMChCOwAAAAAABkVoBwAAAADAoAjtAAAAAAAYFKEdAAAAAACDIrQDAAAAAGBQhHYAAAAAAAyK0A4AAAAAgEER2gEAAAAAMChCOwAAAAAABkVoBwAAAADAoAjtAAAAAAAYFKEdAAAAAACDIrQDAAAAAGBQhHYAAAAAAAyK0A4AAAAAgEER2gEAAAAAMChCOwAAAAAABkVoBwAAAADAoAjtAAAAAAAYFKEdAAAAAACDIrQDAAAAAGBQhHYAAAAAAAyK0A4AAAAAgEER2gEAAAAAMChCOwAAAAAABkVoBwAAAADAoAjtAAAAAAAYFKEdAAAAAACDIrQDAAAAAGBQhHYAAAAAAAyK0A4AAAAAgEER2gEAAAAAMChCOwAAAAAABmWY0P7MM8/IZDJp6tSptm1FRUWaNGmSgoKC5O3trbFjxyotLa3SccnJyRo5cqQ8PT0VGhqqe++9V2VlZfVcPQAAAAAA9meI0L5hwwa9+eab6tKlS6Xtd955p77//nt9/vnnWrVqlVJSUjRmzBjb/vLyco0cOVIlJSVas2aNPvjgA82dO1ePPPJIfV8CAAAAAAB25/DQnpeXp/Hjx+vtt99WQECAbXt2drbeffddzZw5U4MGDVLPnj31/vvva82aNVq3bp0kafHixdq1a5c++ugjdevWTcOHD9cTTzyh2bNnq6SkxFGXBAAAAACAXTg8tE+aNEkjR47UkCFDKm3ftGmTSktLK21v166dmjdvrrVr10qS1q5dq86dOyssLMzWZtiwYcrJydHOnTurPGdxcbFycnIqvQAAAAAAMBpnR578008/1ebNm7Vhw4Yz9qWmpsrV1VX+/v6VtoeFhSk1NdXW5s+B/fT+0/uqMmPGDD3++OMXWD0AAAAAAHXLYSPthw8f1n/+8x99/PHHcnd3r9dzT5s2TdnZ2bbX4cOH6/X8AAAAAABUh8NC+6ZNm5Senq4ePXrI2dlZzs7OWrVqlV555RU5OzsrLCxMJSUlysrKqnRcWlqawsPDJUnh4eFnrCZ/+vPpNmfj5uYmX1/fSi8AAAAAAIzGYaF98ODB2r59u7Zs2WJ79erVS+PHj7d97eLiomXLltmOSUhIUHJysuLj4yVJ8fHx2r59u9LT021tlixZIl9fX3Xo0KHerwkAAAAAAHty2Jx2Hx8fderUqdI2Ly8vBQUF2bbffPPNuuuuuxQYGChfX19NmTJF8fHx6tu3ryRp6NCh6tChg66//no999xzSk1N1UMPPaRJkybJzc2t3q8JAAAAAAB7cuhCdOcza9Ysmc1mjR07VsXFxRo2bJhef/11234nJyctWLBAEydOVHx8vLy8vHTDDTdo+vTpDqwaAAAAAAD7MFRoX7lyZaXP7u7umj17tmbPnl3lMTExMfrxxx/ruDIAAAAAAOqfw5/TDgAAAAAAzo7QDgAAAACAQRHaAQAAAAAwKEI7AAAAAAAGRWgHAAAAAMCgCO0AAAAAABgUoR0AAAAAAIMitAMAAAAAYFCEdgAAAAAADIrQDgAAAACAQRHaAQAAAAAwKEI7AAAAAAAGRWgHAAAAAMCgCO0AAAAAABgUoR0AAAAAAIMitAMAAAAAYFCEdgAAAAAADIrQDgAAAACAQRHaAQAAAAAwKEI7AAAAAAAGRWgHAAAAAMCgCO0AAAAAABgUoR0AAAAAAIMitAMAAAAAYFCEdgAAAAAADIrQDgAAAACAQRHaAQAAAAAwqFqF9hYtWujkyZNnbM/KylKLFi0uuCgAAAAAAFDL0J6UlKTy8vIzthcXF+vo0aMXXBQAAAAAAJCca9L4u+++s339008/yc/Pz/a5vLxcy5YtU2xsrN2KAwAAAACgKatRaB89erQkyWQy6YYbbqi0z8XFRbGxsXrxxRftVhwAAAAAAE1ZjUK7xWKRJMXFxWnDhg0KDg6uk6IAAAAAAEANQ/tpiYmJ9q4DAAAAAAD8Ra1CuyQtW7ZMy5YtU3p6um0E/rT33nvvggsDAAAAAKCpq1Vof/zxxzV9+nT16tVLERERMplM9q4LAAAAAIAmr1ahfc6cOZo7d66uv/56e9cDAAAAAABOqdVz2ktKSnTxxRfbuxYAAAAAAPAntQrt//73v/XJJ5/YuxYAAAAAAPAntbo9vqioSG+99ZaWLl2qLl26yMXFpdL+mTNn2qU4AAAAAACaslqF9m3btqlbt26SpB07dlTax6J0AAAAAADYR61C+4oVK+xdBwAAAAAA+ItazWkHAAAAAAB1r1Yj7QMHDjznbfDLly+vdUEAAAAAAKBCrUL76fnsp5WWlmrLli3asWOHbrjhBnvUBQAAAABAk1er0D5r1qyzbn/ssceUl5d3QQUBAAAAAIAKdp3Tft111+m9996zZ5cAAAAAADRZdg3ta9eulbu7uz27BAAAAACgyarV7fFjxoyp9NlqterYsWPauHGjHn74YbsUBgAAAABAU1er0O7n51fps9lsVtu2bTV9+nQNHTrULoUBAAAAANDU1Sq0v//++/auAwAAAAAA/EWtQvtpmzZt0u7duyVJHTt2VPfu3e1SFAAAAAAAqGVoT09P17hx47Ry5Ur5+/tLkrKysjRw4EB9+umnCgkJsWeNAAAAAAA0SbVaPX7KlCnKzc3Vzp07lZGRoYyMDO3YsUM5OTm644477F0jAAAAAABNUq1G2hctWqSlS5eqffv2tm0dOnTQ7NmzWYgOAAAAAAA7qdVIu8VikYuLyxnbXVxcZLFYLrgoAAAAAABQy9A+aNAg/ec//1FKSopt29GjR3XnnXdq8ODBdisOAAAAAICmrFah/bXXXlNOTo5iY2PVsmVLtWzZUnFxccrJydGrr75q7xoBAAAAAGiSajWnPTo6Wps3b9bSpUu1Z88eSVL79u01ZMgQuxYHAAAAAEBTVqOR9uXLl6tDhw7KycmRyWTS5ZdfrilTpmjKlCnq3bu3OnbsqF9++aWuagUAAAAAoEmpUWh/6aWXdMstt8jX1/eMfX5+frrttts0c+ZMuxUHAAAAAEBTVqPQvnXrVl1xxRVV7h86dKg2bdp0wUUBAAAAAIAahva0tLSzPurtNGdnZx0/fvyCiwIAAAAAADUM7VFRUdqxY0eV+7dt26aIiIgLLgoAAAAAANQwtI8YMUIPP/ywioqKzthXWFioRx99VKNGjbJbcQAAAAAANGU1euTbQw89pK+++kpt2rTR5MmT1bZtW0nSnj17NHv2bJWXl+vBBx+sk0IBAAAAAGhqahTaw8LCtGbNGk2cOFHTpk2T1WqVJJlMJg0bNkyzZ89WWFhYnRQKAAAAAEBTU6PQLkkxMTH68ccflZmZqf3798tqtap169YKCAioi/oAAAAAAGiyahzaTwsICFDv3r3tWQsAAAAAAPiTGi1EBwAAAAAA6g+hHQAAAAAAgyK0AwAAAABgUIR2AAAAAAAMitAOAAAAAIBBEdoBAAAAADAoQjsAAAAAAAZFaAcAAAAAwKAI7QAAAAAAGBShHQAAAAAAgyK0AwAAAABgUA4N7W+88Ya6dOkiX19f+fr6Kj4+XgsXLrTtLyoq0qRJkxQUFCRvb2+NHTtWaWlplfpITk7WyJEj5enpqdDQUN17770qKyur70sBAAAAAMDuHBramzVrpmeeeUabNm3Sxo0bNWjQIP3973/Xzp07JUl33nmnvv/+e33++edatWqVUlJSNGbMGNvx5eXlGjlypEpKSrRmzRp98MEHmjt3rh555BFHXRIAAAAAAHZjslqtVkcX8WeBgYF6/vnnddVVVykkJESffPKJrrrqKknSnj171L59e61du1Z9+/bVwoULNWrUKKWkpCgsLEySNGfOHN133306fvy4XF1dq3XOnJwc+fn5KTs7W76+vnV2bRcsZYv0Vn9p1EtSUKtztz25X1owVbp1lRTZre5rAwAAAABUW3VzqGHmtJeXl+vTTz9Vfn6+4uPjtWnTJpWWlmrIkCG2Nu3atVPz5s21du1aSdLatWvVuXNnW2CXpGHDhiknJ8c2Wn82xcXFysnJqfQCAAAAAMBoHB7at2/fLm9vb7m5uen//u//9PXXX6tDhw5KTU2Vq6ur/P39K7UPCwtTamqqJCk1NbVSYD+9//S+qsyYMUN+fn62V3R0tH0vCgAAAAAAO3B4aG/btq22bNmi9evXa+LEibrhhhu0a9euOj3ntGnTlJ2dbXsdPny4Ts8HAAAAAEBtODu6AFdXV7VqVTE/u2fPntqwYYNefvllXXPNNSopKVFWVlal0fa0tDSFh4dLksLDw/Xbb79V6u/06vKn25yNm5ub3Nzc7HwlAAAAAADYl8NH2v/KYrGouLhYPXv2lIuLi5YtW2bbl5CQoOTkZMXHx0uS4uPjtX37dqWnp9vaLFmyRL6+vurQoUO91w4AAAAAgD05dKR92rRpGj58uJo3b67c3Fx98sknWrlypX766Sf5+fnp5ptv1l133aXAwED5+vpqypQpio+PV9++fSVJQ4cOVYcOHXT99dfrueeeU2pqqh566CFNmjSJkXQAAAAAQIPn0NCenp6uCRMm6NixY/Lz81OXLl30008/6fLLL5ckzZo1S2azWWPHjlVxcbGGDRum119/3Xa8k5OTFixYoIkTJyo+Pl5eXl664YYbNH36dEddEgAAAAAAduPQ0P7uu++ec7+7u7tmz56t2bNnV9kmJiZGP/74o71LAwAAAADA4Qw3px0AAAAAAFQgtAMAAAAAYFCEdgAAAAAADIrQDgAAAACAQRHaAQAAAAAwKEI7AAAAAAAGRWgHAAAAAMCgCO0AAAAAABgUoR0AAAAAAIMitAMAAAAAYFCEdgAAAAAADIrQDgAAAACAQRHaAQAAAAAwKEI7AAAAAAAGRWhvTKxWqbzU0VUAAAAAAOzE2dEFwI52fSttfEcK7yxF9nR0NQAAAACAC0Robyws5dLOLyu+Tt1e8ZKkzR9KQa0kN2/H1QYAAAAAqBVuj28sjmyUCjMldz+p8zWS66mQvvFd6fs7HFsbAAAAAKBWCO2Nxf7FFe8tBkk9rpcGP/bHvl3fSYVZjqgKAAAAAHABuD2+gUm0hCs/07nidvhTnIsz1fbIBpkk7QserOLj5VKOu7ws4YoL8pAyE6U9P0jdxzuucAAAAABAjRHaG5DErDINLJkpLZOkfNv225x+0jQXizZZWmvs4sBT+wIlzdTvkYsVkJko7fyK0A4AAAAADQyhvQHJL7FIkia1zVdUaFDFRqtVo3askoqk0haX6+kQd0nS0fSTmp3gpeMR/RWwc650cKVUkCF5BjqmeAAAAABAjTGnvQGK8ixXnL9Zcf5mdVGCfItSVO7kLlPcpbbtUZ4Vt8+XeDereAScpUza/Z2DKwcAAAAA1AShvYELTVkqSToZdokszh5nb9RxTMX7jq/qqSoAAAAAgD0Q2hswc1mhglJXS5LSo4ZU3bDjPyrek36R8tLroTIAAAAAgD0wp70BC079VU6WYhV6RinXr93ZG2UlS6GuUkg76fgeae3sP0L8aW4+UlDLui8YAAAAAFAjhPYGLOTYckmnRtlNpso7nV0q3pc/Ia1M+mP76pcqXn81ZTPBHQAAAAAMhtDeQJksZfLO3idJOhkaf2YD9wBJRdKl90gBZVJhprTscUkmachjkrtfRbvsw9IvL0rFufVVOgAAAACgmgjtDZR7wVGZrWUqc/JQsUdY1Q39oqUgp4qvQ9pLx3dL2UekqJ71UygAAAAAoNZYiK6B8spNkiQV+MSeeWt8VWL7Vbwn/VInNQEAAAAA7IvQ3kB55iVJkgq8Y6t/UMwlFe/HE6SSArvXBAAAAACwL0J7A+WZe0iSlO8TW/2DvIIlr1BJVunkvjqpCwAAAABgP4T2BsorN1GSVOAdU7MDQ9pWvB9PsHNFAAAAAAB7I7Q3QG5luXItyZRUi9AefCq0nyC0AwAAAIDREdobIP/io5KkIo9wWZw9anbwn0farVY7VwYAAAAAsCdCewPkX3REUg3ns58W2EIyO0tFWVJ+ul3rAgAAAADYF6G9ATo90l6jleNPc3aTAuIqvmZeOwAAAAAYGqG9AQo4NdJeUJuRdumPW+SZ1w4AAAAAhkZob2CcVC6/kmOSpPzajLRLfyxGx0g7AAAAABgaob2BaWE6Jidrmcqd3FXsEVq7TkLaVLyfPCBZyuxXHAAAAADArgjtDUx7U7KkU6Psplr++HwiJTcfyVIq5aTYrzgAAAAAgF0R2huYduaK0F7r+eySZDJJwadG2zMPXXhRAAAAAIA6QWhvYNqbKkJ2gXfMhXUU0q7iPYvQDgAAAABGRWhvYNqZD0uS8n3iLqwj20h70oX1AwAAAACoM4T2BsSpJEcRpgxJdhhpP72CfMGJC6wKAAAAAFBXCO0NiHv2QUlSrkuwLM4eF9aZm7fk28wOVQEAAAAA6gqhvQE5Hdqz3KLs0+HpR78BAAAAAAyJ0N6AuOecCu3udhohD25nn34AAAAAAHWC0N6AnA7tmW52Cu1/Hmm3WuzTJwAAAADAbgjtDUV5mdxykiRJWe52uj0+IFYyu1R8nX3EPn0CAAAAAOyG0N5QZByQ2VKqfKub8lyC7dOn2VnyO/UHgBP77NMnAAAAAMBuCO0NhXeYkns9qBll/5JMdvyx+UZWvGccsF+fAAAAAAC7ILQ3FB7+ymk2QB+VX27ffn1PjbSfZKQdAAAAAIyG0N7UnQ7tJ/Y7tg4AAAAAwBkI7U3d6dvjCzOkvHTH1gIAAAAAqITQ3tQ5uf7xdep2x9UBAAAAADgDoR1/ILQDAAAAgKEQ2vEHQjsAAAAAGAqhHX8gtAMAAACAoRDa8YeT+6TSQkdXAQAAAAA4hdCOCh4BktUipe9ydCUAAAAAgFMI7agQ1KrinVvkAQAAAMAwCO2oQGgHAAAAAMMhtKMCoR0AAAAADMfZ0QWgbu3Pspy7QbazvCzhirOF9h2SxSKZ+XsOAAAAADgaob2Rcj/1k526/HyrwQdKmqkV1mDFObtLpflSZqIU1LKuSwQAAAAAnAehvZGK8DZr5kB3FZWdu93R9JOaneCl/DKTFNpBStlccYs8oR0AAAAAHI7Q3ohFeFfjFve88j++Du/8R2jvOLrO6gIAAAAAVA8Tl/GH8M4V7yxGBwAAAACGQGjHHwjtAAAAAGAohHb8IaxjxXtuipR/0rG1AAAAAAAI7fgTNx8psEXF12mMtgMAAACAoxHaURm3yAMAAACAYRDaUVkYoR0AAAAAjILQjspsI+07HFsHAAAAAIDQjr84HdpPJEilRY6tBQAAAACaOEI7KvONlDwCJUuZdHyPo6sBAAAAgCaN0I7KTCYpvFPF18xrBwAAAACHIrTjTOFdKt4J7QAAAADgUA4N7TNmzFDv3r3l4+Oj0NBQjR49WgkJCZXaFBUVadKkSQoKCpK3t7fGjh2rtLS0Sm2Sk5M1cuRIeXp6KjQ0VPfee6/Kysrq81Ial9Pz2tNYjA4AAAAAHMmhoX3VqlWaNGmS1q1bpyVLlqi0tFRDhw5Vfn6+rc2dd96p77//Xp9//rlWrVqllJQUjRkzxra/vLxcI0eOVElJidasWaMPPvhAc+fO1SOPPOKIS2oc/vysdqvVsbUAAAAAQBPm7MiTL1q0qNLnuXPnKjQ0VJs2bdJll12m7Oxsvfvuu/rkk080aNAgSdL777+v9u3ba926derbt68WL16sXbt2aenSpQoLC1O3bt30xBNP6L777tNjjz0mV1dXR1xawxbcRnJylYpzpKxDUkCsoysCAAAAgCbJUHPas7OzJUmBgYGSpE2bNqm0tFRDhgyxtWnXrp2aN2+utWvXSpLWrl2rzp07KywszNZm2LBhysnJ0c6dO896nuLiYuXk5FR64U+cXKSQdhVfM68dAAAAABzGoSPtf2axWDR16lRdcskl6tSpYvXy1NRUubq6yt/fv1LbsLAwpaam2tr8ObCf3n9639nMmDFDjz/+uJ2voGHbn1EmHc22fY7ybaeA1G1K37dB6b6X2bZ7uTkrLtjLESUCAAAAQJNjmNA+adIk7dixQ7/++mudn2vatGm66667bJ9zcnIUHR1d5+c1InfnijnrUxdnSov/+N7f6OSux1ykLRt+1a1r+lQ6ZsU9AwjuAAAAAFAPDBHaJ0+erAULFujnn39Ws2bNbNvDw8NVUlKirKysSqPtaWlpCg8Pt7X57bffKvV3enX5023+ys3NTW5ubna+ioYpwsOimc6zVXTpg5L/H3+4CMkoltZ/qIu9j+npARUL0x3NKtTsFfuVX8zK/AAAAABQHxw6p91qtWry5Mn6+uuvtXz5csXFxVXa37NnT7m4uGjZsmW2bQkJCUpOTlZ8fLwkKT4+Xtu3b1d6erqtzZIlS+Tr66sOHTrUz4U0cBGmDMUFVNz2fvrlF9tdkuRdmKJWPmWKC/ZSlL+HgysFAAAAgKbFoSPtkyZN0ieffKJvv/1WPj4+tjnofn5+8vDwkJ+fn26++WbdddddCgwMlK+vr6ZMmaL4+Hj17dtXkjR06FB16NBB119/vZ577jmlpqbqoYce0qRJkxhNvwDlrr4q8mom9/wj8srcrZzwvo4uCQAAAACaHIeOtL/xxhvKzs7WgAEDFBERYXt99tlntjazZs3SqFGjNHbsWF122WUKDw/XV199Zdvv5OSkBQsWyMnJSfHx8bruuus0YcIETZ8+3RGX1KgUBFbcqeCZudvBlQAAAABA0+TQkXar1XreNu7u7po9e7Zmz55dZZuYmBj9+OOP9iwNkvID2ivw8GJ5Ze5ydCkAAAAA0CQZ6jntMJb8gPaSJK8MRtoBAAAAwBEI7ajS6dvjPbL3yVRe4uBqAAAAAKDpIbSjSsVeUSpz8ZHZUiqP7AOOLgcAAAAAmhxCO6pmMimfxegAAAAAwGEI7TingtPz2lmMDgAAAADqHaEd53R6pN0rY6eDKwEAAACApofQjnPKC+wsSfI+uUMma7mDqwEAAACApoXQjnMq9GulcmdPOZXlyycvydHlAAAAAECTQmjHuZmdlB/YUZIUlL3dwcUAAAAAQNNCaMd55QV1kSQFZe9wcCUAAAAA0LQQ2nFeecFdJUlB2SxGBwAAAAD1idCO8zo90u6fkyBXlTq4GgAAAABoOgjtOK9i72iVugXIyVqqdqZkR5cDAAAAAE0GoR3nZzIpP6ji0W9dzAcdXAwAAAAANB2EdlRLXlDFvPaupgMOrgQAAAAAmg5nRxcAg8g+fM7dea7BkipG2pnVDgAAAAD1g9De1Dm7Vbz//MI5m+WpTHKSWpuOas+JBCnqonooDgAAAACaNkJ7U+cVLF16t1RWfM5mpZLyNz0gr/JsuR/fKonQDgAAAAB1jdCOiuBeDRmecfLK3SKPzL11XBAAAAAAQGIhOtTASfdYSZJnVoJjCwEAAACAJoLQjmo76RErSfLIJLQDAAAAQH0gtKPaMtybS5JcC1Kl/BMOrgYAAAAAGj/mtDdxm9PKte14ucotUrnVqnKL5Opk0uAYZ8X6Vf6bTqmTpw5YItTSfExK+V1qfbmDqgYAAACApoHQ3kQVlVn14c4SrUguP+v+ZYfKNKC5k65u5yp/N5Nt+1ZrS7XUMenoZkI7AAAAANQxQnsTdCjbolc2FyslzyqTpAHNnRTkYZaTSTKbpMRsi9allGtFcrnWpRTqH61ddEVcxa/KNksLjXH6VUrZ7NiLAAAAAIAmgNDexCxOKtW8naUqs0gB7ibd3t1VnYKdzmh3RVy5PtxRqoPZFn2yu1S/p5frmqiK0C5JOrpJslolk+mMYwEAAAAA9kFob0I2pZbp/e2lkqQeYU66raurfN3OHrrbBjrpiUvN+uVIuT7YUaLdJy36sNhTR6yxsphdZM4/Lp3cLwW3rs9LAAAAAIAmhdXjm4i8Eqve2VYiSRoW56x7elcd2E8zm0zqH+2saX3d5OEsHcxzVolclenfuaJB0q91XTYAAAAANGmE9ibigx0lyiqWIr1M+ld7F5lqcFt76wAn3dfHTa5mqyRpUu4NKrK6ENoBAAAAoI4R2puAjall+vVouUyS/q+7q1ydaj4PvW2gk25oWSBJWpcbosmld8iStKZiXjsAAAAAoE4Q2hu5vBKr3j11W/yols5qHXDmonPVFedd8Xg4V7O01NJTr2b1kTIO2qVOAAAAAMCZCO2N3NxTt8VHeZt0VVsXu/Q5qbePJOmlsrH6Zf16u/QJAAAAADgTob0R23a8XKtP3xbfrXa3xZ/N4Dh3XRt1QlaZ9Z817krJKrRLvwAAAACAygjtjdj3+yse7zY0zlmtLuC2+LN5dGi0OpoSlVHmpsmfbFZJmcWu/QMAAAAACO2NVnKORTtOWGSSNLKFs937d4/rozfcZstH+dqcnKUZC3fb/RwAAAAA0NQR2hupHw9WjLJfFOGkEM86+DG7eql5dLRmurwhSXp/dZKW7kqz/3kAAAAAoAkjtDdCWcVWrT5asdL7yJb2H2W3iblElztt1r/DD0iS7v9qm07mFdfd+QAAAACgiSG0N0JLkkpVZpFaB5gv6BFv5xXbT5J0j+U9tQ3z0Ym8Ek37arusPLsdAAAAAOyC0N7IlJRbtTSpTJI0og7mslcS3UcyOck9J1Ezh4fKxcmkxbvS9PmmI3V7XgAAAABoIgjtjcyvR8qVUyIFe5jUO7wOR9klyc1biuohSepYtFl3Xt5GkjT9+106nFFQt+cGAAAAgCaA0N6IWK1WLUysWIBuWJyznMz2eS77OcVcUvGe9Ktuu6ylesUEKK+4THfP36pyC7fJAwAAAMCFILQ3ItuPW3Qk1yp3J2lQ8zq+Nf60U/PalfSrnMwmzby6m7xcnfRbUobe+eVg/dQAAAAAAI0Uob0RWXqoYi77gObO8nSph1F2yTavXVmHpKxkNQ/y1MOjOkiSXly8V7uP5dRPHQAAAADQCBHaG4mCUqu2pFc85m1AfY2yS5K7r9SsV8XXe3+SJF3TO1pD2oeqpNyiOz/bouKy8vqrBwAAAAAaEUJ7I7EptVylFinS26TmPnU7yr4/o0w7jmbbXqnhAyVJuVu/046j2dqZkqMbLo6Vv4eL9qTmauaSvXVaDwAAAAA0VvU4JIu6tCal4tb4+EhnmUx1E9rdnSsWlpu6OFNa/Ktte0tTkJa5SW5HVuvaVxcrV56Vjnvr54Ma3C5MF8UF1kldAAAAANBYEdobgdwSq7Yft0iS4iPr7jFvER4WzXSeraJLH5T8o/+0p7Nyfp4t3/wkvdE3U8kRfXQ0q1CzV+zXkPahWro7XXfN36KF/7lUPu4udVYfAAAAADQ23B7fCGw4Vq5yqxTja1KUT93+SCNMGYoLcFZcsFelV27MUElSu6yfFRfspSh/D0nSrZe1ULMADx3JLNQTC3bVaW0AAAAA0NgQ2huBtX+6Nd5RMqOHSJL8j66UqbzEtt3T1Vkv/rOrTCZp/sYjWrwz1VElAgAAAECDQ2hv4LKKrdp54tSt8VF1d2v8+eQGd1eJe5CcS3Plm7a+0r4+LYJ066UtJEnTvtquE3nFjigRAAAAABocQnsD91tKmaySWvqbFerpwB+n2UmZzQZLkgKPLD1j911D26hduI9O5pfo/i+3y2q11neFAAAAANDgENobuDUpFc9Av7gOF6Crrsxml0uSAg4vlf4Syt2cnTTrmm5ydTJr6e40zd942BElAgAAAECDQmhvwE4WWpSQUXFrfB8DhPbsiEtU7uQht4JjCsjZfcb+9hG+umtoG0nS9O93KflkQX2XCAAAAAANCqG9AVt3apS9XaBZQR6O/1FanN2VHXmpJKlZ+oqztrnl0ha6KDZQ+SXlumv+FpVbuE0eAAAAAKri+KSHWlt7KrQ7cgG6v8o4tYp8s7Szh3Yns0kvXt1VXq5O2ngoU2/9fLA+ywMAAACABoXQ3kBlFVl1IKvi1vje4Y571NtfZUYNktVkVkBugpqZjp+1TXSgpx69sqMkaeaSBO1Mya7PEgEAAACgwSC0N1Bb0itG2eP8zApwNzm4mj+UuQcqN6SXJOkK829Vtvtnr2a6vEOYSsutuuuzrSoqLa+vEgEAAACgwSC0N1C/nwrt3cOM9yM8EXelJGmc04ozVpE/zWQyacaYzgr2dlVCWq5mLtlbnyUCAAAAQINgvMSH8yqzSNuPnwrtocaZz37aidi/qczJQ63MKfJM21Blu2BvN80Y00WS9PYvB7Xu4Mn6KhEAAAAAGgRCewN0KN9JhWWSr6vUwt94P8JyVx8dirhCkhSw53/nbHt5hzBd0ytaVqt09/ytyi4orY8SAQAAAKBBMF7iw3klZFcsPNct1ElmkwPms2cflk7uP+drv3/Fo9/8Di6QCjPP2d3DV3ZQdKCHjmYV6q75W2ThMXAAAAAAIEkyzrLjqLY9ORU/tu5h9XxrvLNbxfvPL5y36UlrmHabo9Veh6Vtn0t9bq2yrbebs94Y31Nj31ijZXvS9ery/frPkNb2qhoAAAAAGixCewN0othJTiapS0g9h3avYOnSu6Wy4vO3PZ6tT/cm6HHzB9KmudJFt0jnuCugU5SfnhzdSfd+sU0vLdurLs38NLBdqP1qBwAAAIAGiNvjG6i2gWZ5ujjg1nivYMkv6vwv90B9XX6JLGZXKX2ndHTTebv+Z69oje/TXFar9J9Pf9ehk/n1cEEAAAAAYFyE9gaqmwFXjf+rHHkrO6p/xYdN71frmEeu7KBu0f7KKSrTbfM2qbCE57cDAAAAaLoI7Q1IUdkfC7TV+3z2WtoeOFSSZNn+pXYlHtGOo9lnvBJP/DGi7ubspDeu66Fgb1ftSc3V3Z+zMB0AAACApos57Q3I1rQSSVKAq0VR3g64Nb4G3J0rgvaE9ZFa4hql1mVH9fE7L+rj8iFnbb/ingGKC/aSJEX4eej18T01/p11+nF7qp4PStB9V7Srt9oBAAAAwCgI7Q3IhpSK0N7Wt0wmRzzqrQYiPCya6TxbRZc+qJzM8dKe53Sf/3J16jdFVvMfv3ZHswo1e8V+5ReXVTr+orhAPTOmi+7+fKveWHlAcUFeurp3dH1fBgAAAAA4FLfHNxBWq1Ub/xTaG4IIU4biApxV1u06lboFyDc/Sb1zlyou2Mv2ivL3qPL4sT2b6Y5BrSRJD3y9XWv2n6iv0gEAAADAEAjtDcTuY7k6UWiRJLXwaRih/TSLi7dSOlY8p73ZtldlspRW+9g7L2+jK7tGqsxi1f99tEn70/PqqkwAAAAAMBxCewMR6e+uyb28JUkuDfCnltrmepW4B8k9L1khB748Y//+9LyzLlK3MyVHN10cq3bhPsopKtP4t9fpWHahA64AAAAAAOofc9obCH9PV13RykOvbWyYI80WF08d7TRRcRufVLNtr+p4i3/I6uQm91N/gZj62ZZq9ZOWW6xxb63TN7dfogAv1zqsGAAAAAAcj9COepPWZryidr4tt4JjCtv3mVLbTVCEn4dmXt1VRaWW8x6/OzVH89Ye0qGTBbpp7gZ9/O8+8nLjVxgAAABA40XiQb2xOrnpSOdJavHbI4ra8brSW10ti7O7IvyqXozubHzcnbXlcJb+76NNeueGXnJzbhjPrAcAAACAmmqAs6PRkKW3ulpFXlFyLUxX2N6PatXHY1d2lKerk37Zd0J3frZFZeXnH6UHAAAAgIaI0I56ZXVy1dHOkyVJUTvekHPRyRr30TbcR29e31MuTib9uD1Vd83fqnKL1d6lAgAAAIDDEdpR7463HKN8/7ZyKc5U3Ibpterj0tYhmv2vHnI2m/Td1hTd8znBHQAAAEDjQ2hHvbOaXXTg4mdlNZkVnPS9Ag4vrVU/QzuG67V/9ZCT2aSvfz+q/36xTRaCOwAAAIBGhNAOh8gP6qKUDv+WJLVY/5CcSnJq1c8VncL1yrjucjKb9OXmI7r/K4I7AAAAgMbDoaH9559/1pVXXqnIyEiZTCZ98803lfZbrVY98sgjioiIkIeHh4YMGaJ9+/ZVapORkaHx48fL19dX/v7+uvnmm5WX1zCfZd7UHOkyVYU+sXItTFfMpqdr3c/ILhGadU03mU3S/I1HdM/nW1mcDgAAAECj4NDQnp+fr65du2r27Nln3f/cc8/plVde0Zw5c7R+/Xp5eXlp2LBhKioqsrUZP368du7cqSVLlmjBggX6+eefdeutt9bXJeACWJzddSD+GUlS2P758jv2a7WO25+epx1Hsyu9WgR76e6hbWU2SV/9flQ3vP+b9qbl1mX5AAAAAFDnHPqc9uHDh2v48OFn3We1WvXSSy/poYce0t///ndJ0ocffqiwsDB98803GjdunHbv3q1FixZpw4YN6tWrlyTp1Vdf1YgRI/TCCy8oMjKy3q4FtZMbdpGOtZ2giIQP1WLtA9o2aoHKXX3P2tbdpeJvTFM/23LeflfvP6mhs37W6+N7qHmg53nbe7k5Ky7Yq0a1AwAAAEBdc2hoP5fExESlpqZqyJAhtm1+fn7q06eP1q5dq3Hjxmnt2rXy9/e3BXZJGjJkiMxms9avX69//OMfZ+27uLhYxcXFts85ObWbTw37SO5+rwKOLJN7/hG1Wn23Ega8KZnOvAkkws9DM6/uqqLSc9/6vjctVx+tO6Qyi1W3f7y52nWsuGcAwR0AAACAoRg2tKempkqSwsLCKm0PCwuz7UtNTVVoaGil/c7OzgoMDLS1OZsZM2bo8ccft3PFOKvsw+dtYnHx0N7LXlOnn65R4JFlarbtZR3peudZ20b4eZy3v7hgLzUL8NBzPyWopMyi6AAPTYiPlZfb2X/dj2YVavaK/covLjtv3wAAAABQnwwb2uvStGnTdNddd9k+5+TkKDo62oEVNULObhXvP79Qreb5gx7WgU7/Uettzyt626sqcA5QRni/Mxu6eEi+Ueftr2Oknx4c0V7P/bRHhzML9f7qRN0/vL1CfNxqchUAAAAA4FCGDe3h4eGSpLS0NEVERNi2p6WlqVu3brY26enplY4rKytTRkaG7fizcXNzk5sb4a1OeQVLl94tlRWfu11RjvT7h9LyJ3RCkrcpQBHmTLXa9IS2W2JVKPczjxnzVrWCe5swHz12ZUfNWLhHKdlFevS7Hbp/ePtqzXEHAAAAACMwbGiPi4tTeHi4li1bZgvpOTk5Wr9+vSZOnChJio+PV1ZWljZt2qSePXtKkpYvXy6LxaI+ffo4qnSc5hV8/jZ+UZXCfZK1XJ4Jr8kvZ6/aeeZqe8eJKnM+Nc88P13aNl8qLax2Cc0CPDX9bx31zKI9OpJZqMe/36l7hrZV+4izL3YHAAAAAEbi0Ee+5eXlacuWLdqyZYukisXntmzZouTkZJlMJk2dOlVPPvmkvvvuO23fvl0TJkxQZGSkRo8eLUlq3769rrjiCt1yyy367bfftHr1ak2ePFnjxo1j5fiGxCu4Irz7RUn+zbW3+0Mqcg+Ve/EJtTvwrsxegRX7vELP39dZBHm76dErO6ptmI8KSso1Y+Fu/ZaYYeeLAAAAAAD7c2ho37hxo7p3767u3btLku666y51795djzzyiCTpv//9r6ZMmaJbb71VvXv3Vl5enhYtWiR39z9umf7444/Vrl07DR48WCNGjFC/fv301ltvOeR6YB9lrr7a0/1Blbr4yCc7Qe22PCVz+Xlusz8PbzdnPTCivXrFBKi03KqXlu3Vkl1pdqoYAAAAAOqGyWq1Wh1dhKPl5OTIz89P2dnZ8vU17m3TO7b8plGfHtfT3XMU16zqOfuNhVf2PnXY9IicywuVGdxTCbETZF0/R7ryZSmoVa36LLdY9f7qRC3bU7EWwtgeUerRPEAPfrNDC6b0U6coP3teAgAAAACcVXVzqENH2oFzyfdrrT3dH1K52VUBJzap1cG5ki7sb0xOZpNu7hensT0qFrL7cvNRfbPl6IUXCwAAAAB1gNAOQ8sN6KiErtNkMTkrOON3tTKlyGS5sOepm0wmXdUzWv/vkjiZJG1Iyqw4V1GpHSoGAAAAAPshtMPwsoO7a1+Xe2QxmRVizlGbzdNlLiu64H4v7xCmu4e2latzxT+Duz/fqoPH8y64XwAAAACwF0I7GoSM0L5KaH2bLFaTAtPXqd3ym+RUknvB/faMCdBtl7WQJKVkFekfr6/Rmv0nLrhfAAAAALAHwz6nHfirLP9O2mVprnZux+WXtl4dll6n3YPeV5l74AX1G+HnIUmKDfJU0skCXf/eb/q//i00vFPEGW293JwVF+x1QecDAAAAgOoitKNByZWndvV5Xu03Pizvk9vV6aertXvQeyr2aV7rPt1dKm44STpZIKlihfnZKw5o9ooDZ22/4p4BBHcAAAAA9YLQjgYn36+1dgz7TB2WTpBHzkF1XjhWewa+rbyQbrXqL8LPQzOv7qqiUousVqtW7j1ue4Z7mzBvjevdXO4uTjqaVajZK/Yrv/jCFsIDAAAAgOpiTjsapCK/lto+/CvlBXaUS/FJdVxyrQIPLax1fxF+HooL9lKLEG/9v0viNHVwa7k6mbU3LU/v/pooT1cnRfl72PEKAAAAAOD8CO1osEo9w7Rz6KfKiBokc3mx2vw8WRE735asF/Ysd0nq0yJIj1zZQQGeLjqaVagHv96uhNQcO1QNAAAAANVHaEeDZnHxUsKAN3Ws7QSZZFXs5hlquea/MpUXX3DfLUO89eTozmod6q38knJ9uPZQxTnt8EcBAAAAAKgOQjsaPrOTkno/qsReD8tqMiv04JfquPhauRSkXXDXgV6uenhUBw1pH6bTUf2JBbuUXVh6wX0DAAAAwPkQ2tE4mExKbX9TxSPgXP3kc2KLuvz4d3kf33LBXbs4mXVzvziN7dFMkrQhKVNXvvqrth3JuuC+AQAAAOBcCO1oVLIjL9X24V+rwK+1XAvT1XHxOAUf+MouffeMCZAkhfm6KTmjQGPfWKO5qxNl5XZ5AAAAAHWER76h4ck+fM7dRZK293lerbc+q8C0tWq95h55pazRoXa3SGansx/k4iH5RlXr9C+P6673Vyfqp51peuz7XVp3MEPPXtVFfh4uNbwQAAAAADg3QjsaDme3ivefXzhvU4ukBFkVbQpWM/MJRSZ9Jc/En7TPEqUyVRHcx7xVreDu7easOdf11Nw1SXr6x91atDNV249ma9Y13XRRXGANLggAAAAAzo3QjobDK1i69G6prPorwx+WlJ+xWa0OzpO/JV+dPTO0p/VtKvSM/KNRfrq0bb5UWljtfk0mk266JE49mgdoyv9+V3JGgca9tVYTB7TU1CFt5OLEzBMAAAAAF47QjobFK7jGh2T4RWlHcCe13fK03IuOq/PuF7Wv013KDL3ogsvpGu2vH+7op8e/36UvNh3R7BUH9Ou+E5p1TTe1CPG+4P4BAAAANG0MB6JJKPCJ0/Y+Lyg7oKOcyovUbuvTijo4X7LDInI+7i564Z9d9fr4HvLzcNHWI9ka+cqven91oiwWFqkDAAAAUHuMtKPJKHP10+4ejyt273sKP/yjmh/4RJ55STrQ7CpZ7ND/iM4R6t7cX/d8vlWr95+0jb7fMbi1ovw9znmsl5uz4oK97FAFAAAAgMaE0I4mxWp2VmK7W5XvHau4PW8pOG2NPHIOKUHuqv5M+apF+Hlo3v/ro1eX79Ospfu0MyVHt83bVK1jV9wzgOAOAAAAoBJCO5qk9GZDVejVTG22PSuvwqPqbHbS3hO/Kyeo1XmP3Z+ed942MUEV4btZgIeOZBbavh7dLUqRfxl1P5pVqNkr9iu/uKwWVwIAAACgMSO0o8nKDeig7X1eUNtNT8i7IFkdNkxTkiVPqe1ulEymM9q7u1QsATH1sy3VPsedQ1pr97Fcfbw+WUcyCzV75X4N7xiuf/aKlrtLFY+eAwAAAIBTCO1o0krcQ7Szw51q8dujCjHnKG7jE/I6tk4HO/1HVifXSm0jJM0cEa4it5Bq9e3uYlaEn4ci/T3VIyZA89Ye0tqDJ/XjjlStS8zQhPgYXRTLc90BAAAAVI3QjibP4uqj/dZI5VvcFWNKV+jRJfI88osSLM1UIpdKbSMkacxbkm9Ujc4R4OmqOwa3Vv82IXpvdaLSc4v10tJ96hjpq8Htw+x3MQAAAAAaFUI74BUsXXqPjpUVqyB7j1rvf0/e5fnq7J6mva1uUa5Py4p2+enStvlSaWGtT9U12l/PXdVF321J0ffbUrQzJUe7j+VIkvKKmNMOAAAAoDKe0w5IFcHdL0rZzQdre98Xle8dI9fSXHXY84pCc3dIflGSV6hdTuXm7KR/9orWC1d11UWxgTr9KPd/f7hRb/18QEWl5XY5DwAAAICGj9AO/EWxZ7h29H5GJ0Mvltlappa731Dc7jkyWew7Eh7q6647L2+j/3dJnCQpr7hMT/+4R4NeWKnPNx5W+ek0DwAAAKDJIrQDZ2Fx9tDeLvcqudV4WWVS+JFF6rDnFbnI/rewtwr1liRNHdJakX7uSsku0r1fbNPwl3/W0l1psloJ7wAAAEBTRWgHqmIy6WjcP7Wn24Mqc/aUb94BdTYnyitrb52cbkj7MC2/Z4AeHNFefh4u2puWp39/uFFXv7lWmw5l1Mk5AQAAABgboR04j6yQXtp+0fMqdA+Tm6lMndbdqeCDX9fJudxdnHTLZS30838HauKAlnJzNmtDUqbGvrFWt3y4UbtScurkvAAAAACMidAOVEORV5S2d7hXGVZvmS2lar36bsVsfFKy8zz30/w8XHTfFe206t6BGtc7WmaTtGRXmka88otu/XCjdhzNrpPzAgAAADAWHvkGVFO5s4cSLM0UHdNKzY4sUOTu9+SZ/rv2dX9QZa6+Zx7g4lGj57nvT8876/br+saof5sQ/e+3ZP2y74QW70rT4l1pGtI+VFMGtVbXaP9aXhEAAAAAoyO0A9Xl7CbJpMOHDihfUWplTpH/yd/Vecm1SrA0U4HczzxmzFvnDe7uLhU3vEz9bEu1yhjSPlTL96Rr6e6K14C2IbpjcGv1aB5QwwsCAAAAYHSEdqC6vIKlS++WyoqVIWlHwVG13feW3ItPqJPLESU1/6fSQy6WTCYpP13aNl8qLTxvtxF+Hpp5dVcVlVrO2e5oVqFmr9ivUV0idVXPZpq/8YhWJqRrZcJxrUw4ru7N/TWud7Q6RvpVlOvmrLhgL3tcOQAAAAAHIbQDNeEVbPuywC9K24Pbq9X2WQo4uVktkz6RX+FhHewwUeU17DbCz+O8bc43Iv97cpZ+T86qtG3FPQMI7gAAAEADRmgHLkCZi4/2dH9IkYe+UfT+jxWc9ou8c/Zpb4sblG/nc51rRD4jv0Sr9qZrc3KWyi1/PNf9uy1HNXFAK7k6s+YkAAAA0BAR2oELZTIrJXaMcvw7qvX2F+RemKpOu17QYVOgUiw1HXM/t6pG5OOCvdQzJkCZBSVatCNVP+1MVXGZRbOW7tOnGw7r5n5xuvai5vJy4588AAAA0JAw/AbYSZ5/W23rO0snQ+NltpYrxnxcndbdLfecxHqrIcDTVdde1Fz3XdHu1GcXHcsu0pM/7NbFzyzXi4sTdDy3uN7qAQAAAHBhCO2AHZW7eGtvl/9qX4sJKrOa5ZO1S10WjFJYwjzJaj1/B3bi7uIkSXr3ht6aMaaz4oK9lF1YqleX79clzyzXvZ9v1Z7UnHqrBwAAAEDtENoBezOZdCK4j7ZaWig7qJucygvV4rdH1WHJ+HoddZek5IwCdY7y00vXdNO04e3UNtxHJeUWfb7piK546ReNnr1a769O1IHjZ39GPAAAAADHYoIrUEdK5KJdFz2j8ONr1Pz35+WXtk5dvx+uw13u0LGOt8hqdvmjcc7Raj0eTi4e533uu1T9Z79vOZylLYezJEnX9Y3R6G6RtlH6s+ExcgAAAED9IrQDdclkVmr7G5XZbLBarH9I/sd+UcyWFxR8aIEO9nlKeSHdKwL7V7dWv88xb503uFfn2e+Z+SVae/CkfkvKUEmZRR+tO6SP1h067+l5jBwAAABQfwjtQD0o9onW7sFzFZz4rWI3PiGvzD3qvGis0ltepUOx/1SZJHW5WvIKrbqT/HRp2/zqjcjr/M9+jwv2Uo+YAN1UUqbvtqbo573HlVlQKkkySWoX4au+cYFqGeots8mko1mFmr1iv/KLy6p30QAAAAAuGKEdqC8mk060GK2syEsVs/kZhR74UqEHvlDgoYU6bPJWqmeQ5Hf+W9/tzdPVWeN6N9fVPaO18VCmFu9K1c6UHO0+VvEK93XX5R3CFBvE6DoAAABQ3wjtQD0rcw/SgYufV1rrfynut0flnbFDceZ8he14RokdJionsLND6jKbTbooLlAXxQXqaGahluxO0897jys1p0jz1h2Si5NJknTgeJ46Rfk5pEYAAACgqWH1eMBB8kK6a/vwr3Wg039UanWSZ2GKOm56WK23vSjXohMOrS0qwEM3Xhyr18f30M394hQd6KnS8opH1v3n0y0a9eov+mBNkrIKShxaJwAAANDYEdoBRzI7Kb35SG2xtFRq6GWyyqzgtF/Ubc1kRSZ+KVO5Y0Oxu4uThrQP07NjOuuWS1tIkpzNJu04mqNHv9upi55apkkfb9aKhHSVW+rvOfQAAABAU8Ht8YABlMlJibHXKC1utOL2vCXf7D2K2T9PYUcWKbn1DToZdolD6zOZTLYV4z/4fxdpb1quPt94RLuO5eiH7cf0w/ZjCvN105gezfTPns3UIsTbofUCAAAAjQWhHTCQAt8W2tl7hoKPrVTz/R/Jvei42mx/QbnJ3yspcpTyHF2gpOO5xeodG6jesYE6eDxPS3anaVXCcaXlFOuNlQf0xsoDahvmo8HtQ3V9fMx5V7EHAAAAUDVCO1CXsg/XvI3JpBORA5URdrEiDn2jqKSv5ZOdoM7ZCTph8lFywTEVB7Wqm3rPwd2lYjbN1M+2nLdtQlquEtJy9frKA+oVE6BRXSI0onOEQn3d67hKAAAAoHEhtAN1wdmt4v3nF2p+zCkWJzcdbXGN0qMuV/T+TxSaskzB5lwF/vxvHWt3k452vl3lrr52LPrcIvw8NPPqrioqtZyzXW5RqX7df0K/7KtYTG/joUxtPJSpxxfs0kWxgRrVNVLDO4Ur2NvtnP0AAAAAkExWq7XJrx6Vk5MjPz8/ZWdny9e3/kJQTe3Y8ptGfXpcT3fPUVyzcEeXg/PJPyGVFVevrbOb5BV8ziaex9YrZvvL8jcVSJJK3QJ1pPNkpbW5VlYnYwXgxBP5euDr7Zp7U28dOJ6vH7alaHNylm2/2ST1jAnQ5R3CNKR9GHPgAQAA0ORUN4cy0g7UlfOE8Joq8Gym3Zbm8u8zXjF758oz54DiNk5X5K63daTLHUpvOVYyG+ufdLC3mwa0DdXN/eJ0JLNAC7enasG2FG09kq0NSZnakJSpp3/co+hAD/WODVTfuCC1DfeRk9lUZZ9ebs62RfEAAACAxs5Y/4UP4DxMynKLVPbFryrkyCI12/+J3AqOqeW6aYrcPltHWl2nExH9JbOT5OIh+UY5umCbZgGeuuWyFrrlshY6mlWoZbvTtGRXmtYeOKnDGYU6nHFUX20+Wq2+3p7QSxF+554fT7gHAABAY0BoBxqKP82Tt0pKl3RCQQozmRVlOimP/CNqvfUZRW95UUetQTpu9ZN1zDuGCu6nRfl7aEJ8rCbEx2r9wZO65q11ahPmreSMgkpz5p3NJrUM8Vb7CF+1i/BRablFLy7eq1s+3Fit86y4ZwDBHQAAAA0aoR1oKLyCpUvvrjRP3iLpmKS08iJFpK1UROoKuZflqaUpVdHWE0pJ+FDHu0xUmZu/o6o+Ly+3iv8ZuvHiOEUHeighNVebDmVq06FMpecW21ai1xapZYiXRnQKV+swH4X6uMlkOvtt9EezCjV7xX7lF5fV45UAAAAA9kdoBxqSKubJWyQdDWypY23GK/ToEkUe/FJupVmK3fO2ovfN08nYK5Xa9jrlB3Wu33pryNlsVsdIP3WM9NP1fWN0JLNQmw5lauOhDB04nm97aUeqwn3d1TMmQL1iAtQmzEfmc8yDBwAAABoqQjvQiFic3JXa/Eql+XRWyG/PKtzPQ165BxV64HOFHvhcuUFddbzlGJ2MGaEy96A6r2d/el6t25hMJkUHeio60FOju0cps6BEm089Pm7H0Wyl5hTph+3H9MP2Y/Jxd1aP5gG6KDZQnZv52fsyAAAAAIchtAONkNXsonSrv9L7vSQfS7bCEj5SUPJC+ZzcKp+TWxW3YbqyIvrpRNzfldlskN2f9+7uYpYkTf1sS42PqUqAp6sGtw/T4PZhKiwp17YjWdp4KFO/H85UblGZVu09rlV7j8vT1Ultw3wkSaXl536mPAAAAGB0hHagMTOZlBvaS7mhvXSo8EEFJ36r4MTv5J2xQwEpqxSQskoWk7NyQ3sqK2qgMqMGqtCvlVTFXPHqivDz0Myru1ZaVO5c3F3MivDzqHb/Hq5O6tMiSH1aBKnMYlFCaq42JGXqt8STyiwo1e+HsyRJ172zXsM6hWtk5wj1ax0sN2en2lwOAAAA4DAmq9VqdXQRjlbdh9o72o4tv2nUp8f1dPccxTULd3Q5MLLso9La16TL7pH8os/Y7Z53WMEpKxR0bJU88w9X2lfsGa7c0N7KCe2tnLCLToX4c4+CG4XFatXetFwtPvUouT/zOhX0+7UKVvfm/nJx+uOaeDwcAAAA6lt1cygj7UBj9KfHw51NkaQjko7IW25qqQBTngLCY+R7YovcClLllvS9gpO+lySVufopL7Cj8oM6KT+ws/IDO6jIu3nFs+ANxmwyqV24r/w8XM4I7fkl5Vq+J13L96Sf9VgeDwcAAAAjIrQDjdFZHg9XleL8dKVum6/UXk/K7Bcl7xNb5Jv2m3zSN8jn+GY5l2TLP3WN/FPX2I6xmF1V5BOjQr8WKvRtoSLfivdC3xYqd6vFQnA5R6XSwuq1dfE477Pn/3p7vsVqVfLJAm1PydbOo9nKKfrjUXCuTmaVlFu0fHeaxveNkbuL8f4YAQAAgKaL0A40VlU8Hq5K2YdlkZTjEqKcZiOlZiNlspTKMzdJXtn75JW9T945++SZmyizpUSe2fvkmb3vjG5KXf1U5BmlYq9IFfu3UpF3MxV7N1OxVzOVeEXK4uxe+YCco9JXt9as1jFvVSu4/1nLEG8NbBdqu4V+/cEMrT81B16Snvhht2Yt3ach7UM1onOELmsTQoAHAACAwxHagabuHLfSWyXln3pVcJLUUm4qlYdK5G4qkYeK5WEqkbtK5GYqk0tJtlxKsuWTtUs6uvSMPkvcg0+F+CgVe0epWG4qVp6K21ypYv9Wsji5VV1rfrq0bX71R+XP4vQt9O3CfXV9fIxWJRzXW78cVJCXq07ml+ibLSn6ZkuKvN2cNfhUgO9PgAcAAICDENqBpq4Gt9KfVnzq9Vfm8iJ5FKXLLeeg3A6tkltcvNzKcuWWf1TueUfkVJYv16ITci06IZ8TW/440EnSgdclSaUufir2CFGRR4QKvZqpwLuZCr2aqcgzUvZeNdNsMin21Dz292/qrdJyi37YlqqFO47pWHaRvt2Som+3pMjL1UmD24dpROdwDWgbSoAHAABAvSG0A6j5rfRVsEjKV0vlezWXknZIzf72x+r1VqucS3PlWpgu98JUuRWmVbxykuR2crfcnM1yLi+US2m2XEqz5Z2zv3LfJicVuoepwJSv/F3vKD+0p/L92qjcpYrF46ox9/3PzCaTesYEqmdMoB4a2V6/H87Sj9uPaeH2Y0rJLtJ3W1P03dYUuTmb1aWZn3rHBqpnTIDCfN3P2h8r0gMAAMAeCO0A7K+KW+7LTr0KzjighdTnbjm5usut6LjcCtPkUZAij/wjtpdzWYG8ClPkZZZCkuZLSfNltUqFclOu1V158lCO1VNFcpV06jnz1Zj7fjZms0k9YwLUMyZAD45ory1HsvTjtmP6fmuK0nKLtSEpUxuSMs/bDyvSAwAA4EIR2gHYX01vuXd2k7yCVS6pwMVbBT5xqhSJrVa5Fp+QV26iPE/ulFfeIXnlJ8u9JEOeKpanqVhhypYklbj4KdszRtmZx5WTnajiWoT2PzObTerRPEA9mgfo790ideVrq9W3RZBSswuVnFEgy5/u2XdzNqtVqLeCvd20au9x5RWVXtC5AQAAAEI7gLphp1vuJUkmk0rcQ1TiHqLMkItsm12Ks+SdvVfe2Qnyyd4jn+y9ci3NVkj2NoWYJa2coCLvaOWE9VV2eLyywy9WqWfoWU+xPz3vvGUcOF6xJN/fukYqLthLecVl2n4kS78fztLWw1nKKSrTzpQcW/vr3/1Nl7QO1kWxgerRPEDtInzk4mS+sO8FAAAAmhRCO/D/27v74Krqe9/j7/WwH5LsJDsEEgyNRAJIz7EQlIfiFBFLxRkfJvfaORxrBZHxzLRXpzRyqlYFH+5cbIUO12Kx9Vbs2HFAO+r0qIOnRrnaS861pc0RRRkvKiASCE952DvZD2ut+8fe2UlIgCRAspN8XjPbtdZv/dba35X8gvv7+6312zJsJQJhTpTM4URJKpE3nRihpj0UHtpBwcH/TciME2w9QLD1ACV7XwIgkj+Jk+Nmc3LcbFqK/oFgNHUr/cqt9X1+36AvlXiHAjbzKscyr3Isrufx+dEI/3ngJDv3n+Czxggn2xK8/sEhXv/gUOa46V8LM/PiMJeVFXLZhEImjsnFNI3z+FMRERERkZFESbuIjBiuFaB5zHSaA2VwYB+m61BAGwVGhEIjSh7t5LV8Rl7LZ0z4bCtJz6SJPF72lbB/3n8nWnzZWd8j6DN7fAc8pCayqxwXonJciJkXF/HTV3bx+H/9BkdaYuzcd4K/7z9Bc3uS9z8/zvufH88cFwrYfP2ifCaXhJg0NkRlSR6Txob4WlEOtkblRUREREY9Je0iMvKkn6l3kzFOAifTxXaihXDTx4SbdhNu+hhfspViWii2Wrj8/VuIFk7h5IQFnChbQEvJLLwzfWd8HwR9FtdMK+GaaSW4nsfBE2183NDMp4db2dvYyudHI7TGkr1ObGebBmXhHCaEc/haUQ6lBUGKQ37Kx+RyxcVFhHN9GIZG6EVERERGOiXtIjIy9fJMfRI4OnYaR/kv4LnkNX9G+OB2wl/Wkm/EyG36lNymTynb/b9w7NzUc/AXfYvm0rlEw1PB6NvId8ft8/255b5HrK7H/uNR9h/vOdc+pCa9G18YpLQgyEXpZTjXRzjHT1Guj8KO9bzUMugzleSLiIiIDENK2kVkdDJMIoWTiZDDwf17sRc/RmHsEOGvthP+6l38bY2M+bKWMV/WApAIjKG5dA7NJXNoHVtFZMzXTzsSf1FhDr/4pxm0J9wBheZ6Hk3RBI2tMRpbYhxtjXEymuBYJMbR1jgAsaTLvmNR9h3rPak/ld82KQja5Pptcv0WoYBNbsAmz2/heWBbBkGfRa7PIui3CPpMcn02AZ9J0GeR47MI2CZj8vxcOj6fXL+N39bt+yIiIiIXmpJ2EREg2dbEscJLOXbppTD1X8ht+Yxw418oPPaf5J/4EF/sOMX7t1G8fxsArukjWvR1WsfOIBqeSrRwCm3hqSQDYYBen3s/V58fjfDTV3axblGYcNDkaNThWJvLsTaX420urXGXlrhHSyy9jLu0xj2SLsSTbjrhj5+3eCzTIGibBNJJfdBnErDTCb7PJBSwKSkIkOe3yfFb5PotcvypjoKu66l9qc6E3PS6dZrJ+T4/GiESS/YpvryAzSVj887b9YqIiIgMBSXtIjK62enR8nfXdSuOpl9fAQaXkEcbhUaUfCNKKGDjizcROvYBoWMfdDsunlNCe6iceN5FxPLKiOWVkQiOJekvxPEXppa+PFzLj2f68Exf6rZ718F0ExhuHMNNYLjJzm0ngenGKTu8jyvNBl5/O0GABH4SBIzUejEJik+5tAQWMXy0GX5ayaGZXJq9PFrIJUKQKEGSWIDBN8tzMP25xJMusaSbXjrEHZd40iPhuLQnHGLJzrsHHNcjEneIxJ3z/VvBlx75D/osgnZqtN80DPYcbunXef7nP1dxydg8zDM8GqDkXkRERLKZknYRGd3Sk9aRjJ22ige0Aq2RI/DBizDrHgJ+H6GTnxBq+pSc1n3ktuwj0H4Ef1vqRWPfQ/AwMPD6VPdqf9/P2xcOFgnPxGstJhkIk/QVkggWkPQXkvCnl77O7YNuMSdyK4g7RjqhdzuX6fVE0uV4NM7bnxwZcFwJxyPhJGlp79uo+un8aEt9n+r9t4WVVBTnUZDjoyDooyDHpiDoIxSwCQVtfOmZ/C/ESP+FuntAdyWIiIiMDEraRUR6mbSuVx2j8u+tJwbEgGOZncVYhAkSJ9BlBNxvJPGRxMbFxsHGwTS6J+i9JeyuYadG4i0fnuHDtfy4GHiRY7ihUlxfHq7pwzP9qaVh42VGkw3Aw3STGG4C00tguAmsZBt2MoKdiGAloxi4WDhYhgNtDanXWcwk1cmQDIRJBopIBIpIBsaQCBaRDBSRDIRxgjm44Tz+JdRKW9IkaQVJmkGcjqXpw8PEMyw8w8T15eKFSnGxSHqctiMgHjmJ+9Er5BCnHT8x/LTjI+b5aCNAlABRgkS9IBGCtJKDg9WnX+1T7+w94/6gzyTHZ3EimujT+Tr8/LvTuWRsHkHbIseffnzA33kHwYETbSxct71f53xn1dVnTbA/Pxq5IOcVERGRwTdikvannnqKJ554goaGBmbMmMEvf/lL5syZM9RhichIcpZReQeIpF9n5LkYnoPpJTFaD2PsfhVv3l244Qo808YzrN5nqm86kLqN/x/vgsIJ53YtnovltGMlItgtX2LFmrCdKL5kBDvRii/Zmkrwk634Eh3LFmy3HQMPX+wEvtgJzvTk/uSBhIWBZ9qAgWeYYJipJakOCc8XBTuAZ1qAiYcBhoFnWLjpDgzH9ON6Hm7LYdrGTafZV0IzIZoI0Ux++jGBHFq8HA61+/i/h2FGxXg8y0dzHJpjLs1tCZrbE5nJBNsT7oAmFvzJHz444347/ex+0Gfit0ws08AyTWzTwLaM9LaBbRokHI/Pj0Z49N8+orQgiN828Vkmfjt1bNdlY0uqjV77D6WMyw+kz2FmztWxtK1U3d/VfUFjSztl4SB+6zx908CxvRDr4+MMgXworjxrNd09ICIio9GISNq3bt1KTU0NTz/9NHPnzmXDhg0sXryYPXv2UFJSMtThichI0tdR+bPwSCX5BMKADXVP9/1g+9y+Px4Aw8Sxc3HsXOI54/p2TNNBjLpfYl/7CHZuIb7YCez249jpBN5uP44vfhIzGcWKHsNs/AgzpxALF9OJY7kxTDeO6Z3+GXgDD8M9w2i2ATjR9A/vbNcIHP0/Z68XBLrcZJAa/Q/hFISI2/m0+MbQYoZpcPL5jwYoG1+GEyikxcil1cuh1QsScf20uj6ijkXcNWiOuew70U5ZUR5g0pb0aE+6tCVSyw5JN3WXRX86Bd7Z0/dnL/599+E+1/2nX/9HZt1nGfgtE1/XDoFTOgd8vZSn5g7wMGLN8MlrmbtIOpfd17v5+k0QDHe5Y6ST50FLLMEbu85+N0hX1102nvyAjWGQ6QzJdFxYBpaRWm9uT+K6HqbZ2VFiGqnJFi0jvZ1ezw3YXFQYzHR62JkOkO4dLvap61Znh4nP6uxA0VcxiojI2Rie5/XtQcosNnfuXGbPns3GjRsBcF2X8vJy7r77bu67776zHt/c3ExhYSFNTU0UFBRc6HAH7MP697lhSyP/Y2Yzl3xt/FCHIyLnS+ToGZ+p78YOnLeOg35rOgh1G+GqVVBYfpa66bsC5vVyV4DnAS6G52Kc/BLe34Rx5V0Y+RdheA6G65Dq1vAwPBc8L5XoNX+F8dffwoxbMELjUnU8DwM3NXGfk+oUsNJLs+0YZqItVebGMZ1Y6u4Ctx3LiWE5bXjxKJG4Qx5thGjv8ejCheB5pCYIJEAbfuKejzh2+uUjho8Evsx6HD8J7NRjAG7qjoKkkaofw0fCSx3XcXwCm3bPJurYOKaPJFZqv2eTwCLu2Zn3S6TXk318jEDOP8vwsA0w00vL9LAMUusG2B3bZmo7lfQbqQ6QdL5v0NkJYhjdO0S6bnc9BtJ/ikDccXHdzrbvpR+xIfPf7vVP5XV5x44qpgG2afYo7+2cScfF9TjtzB5d39cwUh0hp5b3dv6k49HxMbfj52Bk1r3MumlAwDIxjM4rMdPrRvpaOjqbEk7nv0md5/R6nBfAcuMELbfzfTvqZY5Nb5smhu3vXs/wMDwyMXW8Yo6L56aP7/ZIFJlfrkeqM8gyDXL8dpd9qXbTsU6X41OrXcu6nbmn0+w4Xf3TlvfzPK4HLqnfvev13PbSdTzPIJJwSDpe7/sB1zPoaPaZOnT+zk0620fHtm0a5PrNTJmV/pGZdFnvuo8ux2e2vcwxhtHzvTrbXHp5SnlH3Y5zdNTp+jPr8bd0mr+V1HaXv1/v1H1nOO4M5zz1/bx0/W7rp26fun7qko4O3CQJx0u9xSn7oPN3aZkmN82ZwuWX/eOp0WWVvuahw36kPR6Ps3PnTu6///5MmWmaLFq0iLq6ul6PicVixGKdH5CbmpqA1A8tm7W2tuLGouz9qpFotG/fzSwiI00MGKJ/q2LN0F4M/765jwcUw/5GCJyhQyLWDO0l8PaLfTxnGRz1QUvXUWkT8KdfXVWkP9Gc4XSxZo78v5204Qc8fCQJkMCHQ8BIzdDvJ5l6GUnyaCdstKbnKXDwpecp8BlOOvV1MfDwMIhjY6fnDeiY08DKdArEsYgT6uNVd9Ofu/TPVjf9Wc31jHREdrrDIJXQJ7CJpTsDktjEPSvTMRDHznQAJNN13G4pS+fnNu+UN+ws730Sxt4/sPetXm9lLuBi4ngWDiZJOpcuJk76lcTC8Uyc1GwPJDFxMTL1nfT+JFa6zEjXS788K32sSZKu9SySp/nI5QL9mylBso9xmvXgYAdyFj3/IkVGLodQ7D0mX3yWQYYh1pF/nm0cfdgn7UePHsVxHEpLS7uVl5aW8sknn/R6zNq1a3nkkUd6lJeXZ/cvtcPqoQ5ARKTP3hgm5xQREZGR5F+Bf/3hD4Y6jD5paWmhsLDwtPuHfdI+EPfffz81NTWZbdd1OX78OMXFxVn9bFlzczPl5eUcOHAgq2/jFzlXausymqi9y2ihti6jhdq69JXnebS0tFBWVnbGesM+aR87diyWZXH4cPfJdg4fPsz48b0/9x0IBAgEuk/kFA6HL1SI511BQYH+AZBRQW1dRhO1dxkt1NZltFBbl7440wh7hzM96Tcs+P1+rrjiCmprazNlrutSW1vLvHnzhjAyERERERERkXMz7EfaAWpqali2bBmzZs1izpw5bNiwgUgkwvLly4c6NBEREREREZEBGxFJ+5IlS2hsbGT16tU0NDRQVVXFtm3bekxON9wFAgHWrFnT49Z+kZFGbV1GE7V3GS3U1mW0UFuX821EfE+7iIiIiIiIyEg07J9pFxERERERERmplLSLiIiIiIiIZCkl7SIiIiIiIiJZSkm7iIiIiIiISJZS0p5lnnrqKSoqKggGg8ydO5f333//jPVfeuklpk2bRjAY5Bvf+AZvvPHGIEUqcm7609Y/+ugjbr75ZioqKjAMgw0bNgxeoCLnQX/a+zPPPMP8+fMpKiqiqKiIRYsWnfX/BSLZoj9t/eWXX2bWrFmEw2Hy8vKoqqri+eefH8RoRQauv5/ZO2zZsgXDMKiurr6wAcqIoqQ9i2zdupWamhrWrFnD3/72N2bMmMHixYs5cuRIr/V37NjBLbfcwooVK/j73/9OdXU11dXVfPjhh4McuUj/9LetR6NRJk2axOOPP8748eMHOVqRc9Pf9r59+3ZuueUW3nnnHerq6igvL+faa6/l4MGDgxy5SP/0t62PGTOGBx54gLq6Oj744AOWL1/O8uXLefPNNwc5cpH+6W9b7/DFF1+watUq5s+fP0iRykihr3zLInPnzmX27Nls3LgRANd1KS8v5+677+a+++7rUX/JkiVEIhFee+21TNk3v/lNqqqqePrppwctbpH+6m9b76qiooKVK1eycuXKQYhU5NydS3sHcByHoqIiNm7cyNKlSy90uCIDdq5tHeDyyy/n+uuv57HHHruQoYqck4G0dcdxuOqqq7jjjjt47733OHnyJK+++uogRi3DmUbas0Q8Hmfnzp0sWrQoU2aaJosWLaKurq7XY+rq6rrVB1i8ePFp64tkg4G0dZHh6ny092g0SiKRYMyYMRcqTJFzdq5t3fM8amtr2bNnD1ddddWFDFXknAy0rT/66KOUlJSwYsWKwQhTRhh7qAOQlKNHj+I4DqWlpd3KS0tL+eSTT3o9pqGhodf6DQ0NFyxOkXM1kLYuMlydj/Z+7733UlZW1qOTViSbDLStNzU1MWHCBGKxGJZl8atf/YrvfOc7FzpckQEbSFv/85//zG9/+1vq6+sHIUIZiZS0i4iIZKnHH3+cLVu2sH37doLB4FCHI3Le5efnU19fT2trK7W1tdTU1DBp0iSuvvrqoQ5N5LxoaWnhtttu45lnnmHs2LFDHY4MU0ras8TYsWOxLIvDhw93Kz98+PBpJ94aP358v+qLZIOBtHWR4epc2vu6det4/PHHeeutt5g+ffqFDFPknA20rZumyeTJkwGoqqri448/Zu3atUraJWv1t63v3buXL774ghtvvDFT5rouALZts2fPHiorKy9s0DLs6Zn2LOH3+7niiiuora3NlLmuS21tLfPmzev1mHnz5nWrD/CnP/3ptPVFssFA2rrIcDXQ9v7zn/+cxx57jG3btjFr1qzBCFXknJyvf9td1yUWi12IEEXOi/629WnTprFr1y7q6+szr5tuuomFCxdSX19PeXn5YIYvw5RG2rNITU0Ny5YtY9asWcyZM4cNGzYQiURYvnw5AEuXLmXChAmsXbsWgB/96EcsWLCA9evXc/3117Nlyxb++te/8pvf/GYoL0PkrPrb1uPxOLt3786sHzx4kPr6ekKhUGaERiRb9be9/+xnP2P16tW88MILVFRUZOYpCYVChEKhIbsOkbPpb1tfu3Yts2bNorKyklgsxhtvvMHzzz/Ppk2bhvIyRM6qP209GAxy2WWXdTs+HA4D9CgXOR0l7VlkyZIlNDY2snr1ahoaGqiqqmLbtm2ZiS7279+PaXbeHHHllVfywgsv8OCDD/LTn/6UKVOm8Oqrr+ofAMl6/W3rX331FTNnzsxsr1u3jnXr1rFgwQK2b98+2OGL9Et/2/umTZuIx+N897vf7XaeNWvW8PDDDw9m6CL90t+2HolE+OEPf8iXX35JTk4O06ZN4/e//z1LliwZqksQ6ZP+tnWRc6XvaRcRERERERHJUuoCEhEREREREclSStpFREREREREspSSdhEREREREZEspaRdREREREREJEspaRcRERERERHJUkraRURERERERLKUknYRERERERGRLKWkXURERERERCRLKWkXERGR8+rhhx+mqqpqqMMQEREZEZS0i4iIjHC33347hmFgGAZ+v5/Jkyfz6KOPkkwmu9Vbv349RUVFtLe39zhHNBqloKCAJ598crDCFhEREZS0i4iIjArXXXcdhw4d4tNPP+Wee+7h4Ycf5oknnuhW57bbbiMSifDyyy/3OP4Pf/gD8Xic73//+4MVsoiIiKCkXUREZFQIBAKMHz+eiRMn8oMf/IBFixbxxz/+sVudkpISbrzxRp599tkexz/77LNUV1czZswY7r33XqZOnUpubi6TJk3ioYceIpFInPa9r776alauXNmtrLq6mttvvz2zHYvFWLVqFRMmTCAvL4+5c+eyffv2c7lkERGREcEe6gBERERk8OXk5HDs2LEe5StWrOCGG25g3759TJw4EYDPPvuMd999lzfffBOA/Px8nnvuOcrKyti1axd33nkn+fn5/OQnPxlwPHfddRe7d+9my5YtlJWV8corr3Ddddexa9cupkyZMuDzioiIDHcaaRcRERlFPM/jrbfe4s033+Saa67psX/x4sWUlZWxefPmTNlzzz1HeXk53/72twF48MEHufLKK6moqODGG29k1apVvPjiiwOOaf/+/WzevJmXXnqJ+fPnU1lZyapVq/jWt77VLQ4REZHRSCPtIiIio8Brr71GKBQikUjgui7f+973uOGGGwiFQpk6v/71r7n11ltZtmwZzz33HGvWrMHzPH73u9+xfPlyTDPV179161aefPJJ9u7dS2trK8lkkoKCggHHtmvXLhzHYerUqd3KY7EYxcXFAz6viIjISKCkXUREZBRYuHAhmzZtwu/3U1ZWhm3btLW1UV9fn6lTWloKwB133MHatWt5++23cV2XAwcOsHz5cgDq6uq49dZbeeSRR1i8eDGFhYVs2bKF9evXn/a9TdPE87xuZV2fgW9tbcWyLHbu3IllWd3qde1UEBERGY2UtIuIiIwCeXl5TJ48uVtZTk5OjzKAyspKFixYwLPPPovneSxatCjzfPuOHTuYOHEiDzzwQKb+vn37zvje48aN49ChQ5ltx3H48MMPWbhwIQAzZ87EcRyOHDnC/PnzB3yNIiIiI5GSdhEREelhxYoV3HnnnUDqmfYOU6ZMYf/+/WzZsoXZs2fz+uuv88orr5zxXNdccw01NTW8/vrrVFZW8otf/IKTJ09m9k+dOpVbb72VpUuXsn79embOnEljYyO1tbVMnz6d66+//kJcooiIyLCgiehERESkh5tvvplAIEBubi7V1dWZ8ptuuokf//jH3HXXXVRVVbFjxw4eeuihM57rjjvuYNmyZSxdupQFCxYwadKkzCh7h82bN7N06VLuueceLr30Uqqrq/nLX/7CxRdffCEuT0REZNgwvFMfMhMRERERERGRrKCRdhEREREREZEspaRdREREREREJEspaRcRERERERHJUkraRURERERERLKUknYRERERERGRLKWkXURERERERCRLKWkXERERERERyVJK2kVERERERESylJJ2ERERERERkSylpF1EREREREQkSylpFxEREREREclS/x9IAbx0GnJwHgAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABHIAAAPaCAYAAAD7o3/hAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOyddXhT5x/FT5I2qbvT4u7u7u7uMnTO3N1+bINtjAHb2Ia7uwx392IFCnX3tI38/jjNYjfF2tLC+3mePaO5Nzc3ae+b+573fM9Xptfr9RAIBAKBQCAQCAQCgUAgEBR75E/7BAQCgUAgEAgEAoFAIBAIBA+HEHIEAoFAIBAIBAKBQCAQCEoIQsgRCAQCgUAgEAgEAoFAICghCCFHIBAIBAKBQCAQCAQCgaCEIIQcgUAgEAgEAoFAIBAIBIISghByBAKBQCAQCAQCgUAgEAhKCELIEQgEAoFAIBAIBAKBQCAoIQghRyAQCAQCgUAgEAgEAoGghCCEnEdEr9cjNTUVer3+aZ+KQPBcI65FgaB4IK5FgaB4IK5FgUAgeH4QQs4jkpaWBnd3d6SlpT3tUxEInmvEtSgQFA/EtSgQFA/EtSgQCATPD0LIEQgEAoFAIBAIBAKBQCAoIQghRyAQCAQCgUAgEAgEAoGghCCEHIFAIBAIBAKBQCAQCASCEoIQcgQCgUAgEAgEAoFAIBAISgh2T/sEBAKBQCCATgtkxAI6HeDoCSidHu84aTE8lsoFcHAr2HMUFB7psYBWAygd+fsXCB5EZiKgUQMKJeDs87TP5tlFqwFS7wN6HeDgATh5Pe0zEggEAgGEkCMQCASCp01qJHBuKXDyDyA3E6jUBWjzDuBVDpArHu4Y6bFA6Gbg8M9AZjxQuinQ/mPApzJg71C45y94fDLigbB9wIH/ASkRQGAdoMMngH8NinECgSXqVCD6IrD7UyD2CuBRGmj7HlC2BeDk/bTP7tkiKRy4uBI4/Rc/93Kt+Fl7VxLjqkAgEDxlZHq9Xv+0T6IkkZqaCnd3d6SkpMDNTaz2CgRPC3EtPiOkRgHLhgJR58wfV7oAk/YBPpUefIyMBGDLdODKevPH5Qpg3DYgpEkBnaxAise+FtWpwP5vgaO/Wm8bugyo0g2QyQruRAUlH50WuLweWDPeelubd4DmrzzXAmCBfi8mhwNrJwPhR8wft1MB43cCQXWf7PgCgUAgeCJERo5AIBAInh7RF6xFHADISQf2fQfkZDz4GGmR1iIOwEnfljfo+hAUPzLigGNzpLdtfQNIiyra8xEUf9KigG1vSm87+D3/pgQFQ8JNaxEHADTZwO5PxLgqEAgETxkh5AgEAoHg4dFpC+5Yej1wYaXxZ5kMKNsSqDWIpVHXtwHqlAcf584h29uiLz7cMQRFT+xV/g1IkRoJqJOL9HQEJYCsJGbjSKHTAkl3rMeoghyznidCt/L/dg5A5S5ArYGAb1U+dns/kJ369M5NIBAIBCIjR1AyiUjOwlurzkMmA74bUBvBno8ZjCoQCB5MrhpIuQecXw7EhVJsqdIdcA8B5E+wHiCTGQOJy7cFmr/MvJTE20CZFiyVkD1ERo4yn1IKmfzhc3YERYu9Y/7b5eIWRWDBg/4m9Dpg9QRmuZRuClxayzGrTAugao8nH7OeJ5QuQMPxQKVOwLXtQGYCUG8k4FkW2PUxAFH2KBAIBE8TcZckKHHodHpM+PskEjJyoNfrMWnhaWx+uSXkcnFTIRAUOJpcrr4uH2Zc2Q7dDOz9Ghi3FQio9WTHrzcKiDwL1B8NLBsGaHOMr3HsN2DMJsDVP/9jlGlOwUavs95WpZsIQC2ueFeimJObZb0tqB7gKLrjCCxw8mKAefx1620O7nSJpEfz72peK/Mxa983BTNmPS/UGQJc3chx2UDoZsA9GBj4F+Di9/TOTSAQCASitEpQ8th0IRKh0Wl4uV1FvNiuIq5EpWLf9dinfVoCwbNJehSwerx1eUJ2KrBu8pNnUniWATp/AWx90yjiGMjNBNZMANKi8z+Gqz/Qc6bE44FA5y8BleuTnaOgcHANAAb8SRHOFAcPoM8c0VJaYI2LP/9mLK9puR3Q7TtmLjWdCmx7W3rMWjuJHe4ED8fer60fS7kPHJtLkV8gEAgETw3hyBGUOBYfu4tapdxRyd8Ver0eFXydsfDoXbSv+oBVe4FA8Ogk3WXwsBQxl5lX4ez7+Md38gbsHG3nXiTdYaima4DtYyhdgJoDgODGwNnFLAOr0g0o2wrwCHn8cxMULnZKoEJ74MXjLNuLvw6UawNU6syW0gKBFP41gSmHgGtbgagLdHYF1gKOzwOiL1EQzk6zfp5cQeE5K0m4SR6GGzuN/3YP4VgacRbQZAFXNwDt3wecPJ7a6QkEAsHzjhByBCWKe4mZOHknCdPaVgAAyGQyNK/gg+Unw5GRrYGzSvxJCwRm6HTs6pQcTrHEpxLg7Ac4eT7c8zXqBxzfYlU2MxHIjAdiQzlx8qnCSZODjVa46hR2Qcn3HLI4MTOswudmcVU9/gYAHeBdma/hXx3o+nXeSrzM+tz+e81UICMWiLsOKJ2Z+eDiD9g75H8egoLH3pGlMh0+5u9N5Bk9f2QmAGkxQGIYRWGPEMAtyHwfvZ4CjZ2KGTfOfszp8qrIkkq9jn9LjScB9haZeQol0OoNtstOvsfSK7kd4BII5KQAKRF0/XmU5jjg7GP+d6jJ4XMSb9Ml6FOZ52lrTHtWyE4HGr4ANJnEbmHpcUCXyty2ZCC/WwQCgUDw1BCzXkGJYteVGNgrZGhU1pidUL+0JxYdu4vDN+PRuUY+q/YCwfOGTgtEngGWDOIqtIEa/YCu3+bvcjHgVcF2/oyLH+BoIgilRgHnlgD7vjaWNSiUfK1qvQEXC+dOehyw/1ugYgfuZ1laBQAqN060rm0Hmk3jBOzSWmDbW4A2T6iR2wFdvgJqDwMU9nTknFkExF9jyGn1vpwcyhV8zYMzgBPzjR2TlM5Ar1+4j0eZB2fyCAoHIeI8f6RGAuunAWF7jY+5BgAj1gD+NSgkJ4fTaRcXygDjGgOAGzuAHe8DOg2fo7AHOn0JKJ04HhjGLLkC6P87cHYhM3IMOPsCw5YB+74Fbu42Ph5UH+j2LSCzA7zKURS6fQBYPRbIyeA+MhnQZBrQ6vUncyMWd2oPBbISgb+7m7car9gJGL9DhJELBALBU0Zk5AhKFPuvx6FqgBsc7I03/AHuDghwc8Dhm/H5PFMgeA5JiQAW9jUXcQDg8jrg1AKjEJIfLr5Ai9elt3X/nqvaBmKvAP9+YZ5Noc0BtkwHku9aP//6NuDkH2xB3vwV6ddo8zZw5h/g4PfA/dNAwk1g82vm567TANveAeKuAncPAXOaAkd/YWnA7k+AuS2AmEvc9+YulmCYtr3OyQDWTaI7aN2UB2fyCASCJyc3i0KKqYgD8Ppb1Jf/v30A+K0ZcORnXs9HfgGizudl4GiMz9HmAtvfobB8YzedOQBQrQ874d3cY/4aGXEUuOuPNn888gyw/zvgynrg4EyKSMuHGkUcgGPHsV+BW/sK5nMotujpvMmwuLe6uYtB9DkZdDcKBAKB4KkghBxBiSFbo8XxsATUKuVuta2SvwtO302SeJZA8BwTccp2vs3xuUB6zIOPoXIFmr0IDFoI+FXnz6WbcUW2QntjK9/0OODwT7aPc2yu+WQoLRo48D3/fXkdj9N7NhBYh69RqgEw6G8Ga976N+/9nAYO/2z7NQ7+CNz819o9lJPOkNPkcODADOnn6jTAnYMs47p7JN+PRCAQFADpMcD5ZdLbDFk2ayaYC8M1+gPnFts+5vllvIZdA4EePwC1BwMXlkvvm5WUl5dj4cC79S9QphkQdRa4tMo6NNnAge+e7eDkmMuAOll627nFHLOfNOxeIBAIBI+N8EUKSgwX76dArdGhRpB1XXplf1f8ffgOMnM0cFKKP2uBAAAzJ2yhTpEuZZLC2Ruo0Qco25zPsXcyL6kCmB2Rcs/2MVLuUuzRx7D8ATIg9b5x+/7/AV7lgTrDOAmzU3FCdWYhSy0aTmAJ1o0d+bxGOFCqnvS2uFCuHqfcl94OcJuzL0WuCu0BRw/b+woEgidDk217DLJTMZfFMrTY2QcIP2r7mCn3mcu1+xPmgfWbJ93e3kBqBAPXTUVtvR5w9GIGj8oVGLqEYk5WEnDyTyD6gvG1HsbVWFJJum17W24mf3+yh/wOEQgEAkGBIxw5ghLDqbtJcLCXo4y3s9W2Sn4u0Or1uHA/5SmcmUBQTAmqb3ubezC7RT0Kzr6AWylrEQcAHNyBgNq2nxtYjyvj6yaxBCv6ItD2ffN9EsOAvV8BG1+iY+f+SR6z18/A9e10/OT3GkH18xev9DogoKbt7QG1+HxNtnQmkEAgKDjsndhqXgq5nfQ1mHjr4a5hgGHo6bH5t7H3qcycHlMUSoa2Z6dSzF0xClg5Gjj4A4N/6w7Pe63aDFh+VgmsY3ubix/fu+oZD3wWCASCYowQcgQlhtN3k1DR1wUKucxqW7CnE+wVMlyJFPXaAsF/+FW13ca5/ceAW6D0tsfB0QNo+bp0YK2dA7MoDv5AcWbVWAB6wLcqV80tcfIGvCvQRdPxE5ZXRJwGru8AGr3AiZYlcjug6TQgdIv0+bkGcELX8XMb5+8J+FZhlk7twbYnmAKBoGBwDWQ3KSk8y7ObnGWg7tVNQO0hDDe2RKFk6dU1kzHg6K9A67ekX8OnEsOULcuH6o5ge+193wB3DgE1B/Dx5HBgw0tAxY4Uwjt+Cjh5WR712cGjNMdhKVq8Djh4WpelCQQCgaDIEEKOoESg1+txJjwJFf1cJbcr5DKEeDrhapQQcgSC/3ALAkZvBMq2Mj7m4MGQ4kqdCv71vMoDw1ey85MBn0rAyDV54cp5Nnx1CssjrqwH+s4z7/wSVB8YvYFdasq1AW7tNZZXVGgP3N4PDPjDfILhWY6daVLuA81elj63Hj9y4hhYh9k7Ln4mr1mPz9/1CeAeAlTvY8z+EQgEhYPCju6WDh8DShc+JpMBlbuyo5RbkLVrT5MNHJ3DccarvPFx7woc6yAzF2FzM4BKXSjgmrpHyrdj7lf4caP4bOdAoTi4IXBhBR87/RfHA1OOzOZz/WsVxKdQfFEogWErgPJtjY+p3ID2HwGVuwGZcfx9CQQCgeCpIMJEBCWCmNRsJKTnoLyvdVmVgdJeTrgihByBwByvcsCQRUBmApCrBhzd2WlKUUDDf04GhRmdjhOhcu2AMRuZJ6HTUrzZ9y0FGFMizgCeZeiSmbSfq+IKe8DRm5k8vX8GstOBDS8anxPcCLi0hsJO48mc6AHMt9j/LQWd7jOAyl3o3jm3GPCqCHT4EPCtxkmHNgcIaQpMOsDMntx0IPIcsOcLoGpPoOF4rrYLBILCx9mH4mutQSxlUuU1M7BTsfSq4TiWUu37lp3v/GsAbd/l/8dtM3bkc/QCXP05DpmOJ07e/K/ZNKB6LyArGdDlctw6/RdFnxf2sLzK2Q/Q5QCaHKDzV8CpP4GEWyzxcvahE8g9mPu6BQEO0gtLzwwp9znW9pnDLCBtTl4pmRxYNQpo8y5FeDvV0z5TgUAgeC4p8ULOr7/+ihkzZiA6Ohp16tTBL7/8gsaNG9vcf9asWfjtt98QHh4OHx8fDBw4EN988w0cHByK8KwFj8rFCGbflPOxLeSU8XbC4VvxyNXqYK8Qq+kCwX84ekrn2jwJmmxmUeyfAdw7Sot9/TGAVwVa8h19eLMfeVb6+a4BQG42SyfcS/E/U5x9OalzD+HKe8+ZfMy/JnBiLlsNG1qIy+RAt++4grxiJDuplGnJVXvXIMDJE0iLoQPo6K8Unip1Bpq/zHIqr/JAjX6crEmVbAgEgsLDTskxI+kOcHwey5oUSrpjqvelMFuqIaDNzgta9+DzlM4cR0yRy6XHE4U9XT/3jnMMyExg9716IzmO2DsBh2dyvHILAhq+APSdy/FJbg/0mAmc/AO4tBbwrsisHnsnCuPPKkpnimDx14FDM/l/70pA6zeBLl/TySREHIFAIHhqlGghZ8WKFZg+fTrmzp2LJk2aYNasWejSpQuuXbsGPz8/q/2XLl2Kd999FwsWLEDz5s1x/fp1jB07FjKZDD/++ONTeAeCh+VSRArcHOzg7SyRjZFHsKcTcrV63EvMRHlflyI8O4HgOST6AvBXN2PXltRIToLqDqeYE1iXwo4tIad6byD6cv4ZC3ZKoOVrQNx1YPu7QOxVClL1RgJ1RwLrJnOVuOV0TtAurjY+9+JKCjfjd3JFfdPLdOkYOP0XcHEVMPFfijkCgeDpkXALWNDFvJ319ndZ4jR02ZPneWUkADve5zVv4NIavm77j4Clg4zhyqmRwP1TQNOp7IDl6MmxKOI0kJPOblp/HwR6/QLUGcptzyJO3gyZ3zLd+FhaFHDnAIX1il0BhRByBAKB4GlRom0LP/74IyZOnIhx48ahevXqmDt3LpycnLBgwQLJ/Y8cOYIWLVpg+PDhKFu2LDp37oxhw4bhxIkTRXzmgkflcmQKyno7Q5ZPPXaQB7tH3IxNL6rTEgieT9LjgM2vS7fePbcUCKwNRJziim6Dsebb5Qqg42dAQhhQf9SDS7wSbgKrRlPEAVhKceQX4OwillgolMy0MBVxDGhzgK3TgYwYcxHHQE46sOcz6xbHAoGg6MhIAo7ONhdxDESeBSLPPPlrpEaYizgGGowBtr4h3SHr+Dw6e+wdgAPfA52/NN++413ztuXPGuoUYPen0tt2fghoM4H06CI9JYFAIBAYKbFCTk5ODk6fPo2OHTv+95hcLkfHjh1x9OhRyec0b94cp0+f/k+4CQsLw9atW9G9e/ciOWfB43MtOg0hXk757uPpZA8npQK34jKK6KwEgueU7BS2D7dF1AUg5jK7Ttk5AGM2AX1+BfrNAybuBap0B+oMsy6LsCQ1Ctj+nvS2W/+yNKLvPJZ4ObgDFTsAFToYg1MBZvFkJtl+jWvbOGERCARPh8w44NpWho5X7mLtkDu7iLk1T8LNPdKPO/mwpEsKvY7bEm5R7FW5mJeo5mQ820JOWjRzixT2DMyv1NnooMzJoKCf39gqEAgEgkKlxJZWxcfHQ6vVwt/f3Jbv7++P0NBQyecMHz4c8fHxaNmyJfR6PTQaDaZMmYL3339fcn8AyM7ORnZ29n8/p6aKMN2iJiNbg3tJWehRO39rtUwmQ5CHI27FCUfOs4i4FiXISGAA6MXVgF4L1OzPvBfTLlCFgewBawAKJZ03Og1wfC5wYj4w5TAnQfdOAHf2s/V4xU7MwJFy5WhymGORcs/268RcAZz8gIBaQM9ZDFSWybnKHneN7YOhB2QSLdH/O1d7AKLzyqMgrkVBgaLNAfr+xvLI5HCKBl7lgN2fMZfFzoHB6Qm3gNDNFG7LtQGCGwMeDwgmz80C0uOlHTfAg7suKVSAHhxf758E/KoBd48Yt1u2Ry9iCvValCuAhhOAyp2BsH0Uszp8DEAPbH+f2+0dC+71BAKBQPBIlFgh53HYt28fvv76a8yZMwdNmjTBzZs38eqrr+KLL77ARx99JPmcb775Bp999lkRn6nAlOsxLHsI8czfkQMAQe4OuBEryiSeRcS1aEFGHCc6ZxcZHzs+F6jaC+j5Q/7ZM0+KoydQtiVw55D1Npkc8KvKm/wzC/mYWylO1uY0ZTcZA3YObDUe3Ni83bdWw0lTVhKPZ3MSBtr7j6/gBM/AqQVAzQFA5y/YvtzZ2/Z7qTUUcPJ62HcugLgWBQWIVgNkxgPLhjJA3YCTF9D/d2Djy0D9sRR5lvSnoAMAp/8GXAOBsVvYelyKXDVwYxewejwwdIn0PolhDFCPuWS9TaEEXPw4liXcAmoOpBPF9BwLWzR/AIV6LbqV4ntcOsT42NnFQEBtYMCf/B4wbekuEAgEgiKlxJZW+fj4QKFQICbG3NYaExODgABpu/5HH32EUaNG4YUXXkCtWrXQr18/fP311/jmm2+g00lPFN577z2kpKT899+9e/msDgsKhesxaZDLGGb8IALcHXE3PrMIzkpQ1Ihr0YLoi+YijoHQTcCdw4X72o6eQI8fpTthtX0PuHeKQZlxee7IVm8wmNhUxAEAjRpYMYIBmqakR/PxOweAih0hiZ2K3WOyU81FHAOX1rAlcfcZgJMv0OxF633cg4HWb4hV5UdEXIuCAiM9Glg11lzEAYDMROazdPoC8CwDLBtiFHEMpEUBm6ezpbitY68Zz3bjN3YCzV+x3ufiaqDXT+blmAY6fQZkpXAfmYxZXNEXuE1uB/T/88HloYVMoV6LmizgwAzrx6MvAHcPAfYu7PQnEAgEgqdCiXXkKJVKNGjQAHv27EHfvn0BADqdDnv27MFLL70k+ZzMzEzI5ebalUJBy73e0MbWApVKBZVKpPI/TW7GpsPP1QFKuwfrjgFuKiRn5SIlMxfuTqKN8LOEuBZNyMkAjs6R3manYnvdCu0KvuW4KT6VgUn7gcvrgLC9dADVHswyptRIYOvbbNvr4M7si/R4QOkGaNV02WjUPE5GPFuDm7YLTrnP93h+OTBwAcsrDDkWMhlg5wT0+IGByRdW2D7Hi6uB6n0YutzqTaBab7qWspKBGn0pErk/oDRDYIW4FgUFRsp9Ou+kiL7IEsy468bxwpLb+yj6GFqSmxJxluWdAFuHt3oDGLwQuLKBZZsVOwLBTYAcNTByDcexu0c5JtQZSkfOpdXcv/dsIPk+UKEj4FMJaDgOcC9Nt85TpFCvxfP5jK2n/wHqjQJg3SFWIBAIBEVDiRVyAGD69OkYM2YMGjZsiMaNG2PWrFnIyMjAuHHjAACjR49GqVKl8M033wAAevXqhR9//BH16tX7r7Tqo48+Qq9evf4TdATFj5ux6QjycHioff3duN/dxAzUdvIoxLMSCJ4iWg2dKAbkdkDTaUC5VpwUOXgC6tTCFXJkMq6Ut3gVaDwRgIylDFmJ7PIyYQeQnQ7kZrCE6oWdgDabna4yEzhJ0uu4Eq5QMoNB6Qw4+9NJ0+dXOmWykoGu3wE5aYCDB/dRKAGVK1fxTy/Iy+PRWp9jdiqQdJehne6lAK+KQPcfGdaclcztGXEUhjJi2dnG3glw9QdcAs3LvQQCwcOTk8lrKzWCOVSugYBLgHUeVs6DmhPorZ18lmizjYKQQsX9czIouEw+RBFIkwWo09jGvN3HgEIB5GbS1SO3Z+hxvdEstdRrAcg4ltUeClTrwxIqbTbgV53lRtlp7MznFsRtKtfH/6yKK2oTgU2u4PeMwTmVkwZAllf2Ku6fBQKB4GlQooWcIUOGIC4uDh9//DGio6NRt25dbN++/b8A5PDwcDMHzocffgiZTIYPP/wQERER8PX1Ra9evfDVV189rbcgeAhuxqajTojHQ+1rEHLuJGSidvDDPUcgKHGoXOk0uXec7pZ+84CrG4ElPxv38SwHDF9h3QGmoJHJKK6kRgHb3qZ7ptPnwJoXOLky7FN3JFClG0umDA7IXr8ASbeBIz8bV86dfYDuPwCn/2KoqIs/g4w9ygIbpgGl6gOlGrArVq1BLOXSaTnJOjIbiDpnPLfy7dha+O6RvBDksXxs5SjjPv3mc+X/+ByjGOTiBwxZSifRg9qjCwQCkhHHTkbpMRRE7x0HDn7PrnAOHsDAv4AyzSn0GvAsx/FByhXt6Mn/AuvYfk33YF63G16iqLxuEpAey21yBfN1AutQdN79CQWbvnOAKxuBvV8wDBmgoNzlK+D2AZZlymRAjf5A1Z50Fa6dCEAPtPsAWD3WWA4qkwMNxwNt3gVcnm5eToFTrQ/HxmYvUYzXZPH3eGUD3//ltexA6F/jwaHRAoFAIChwZHpbNUUCSVJTU+Hu7o6UlBS4uYmQt8JGnatFtY+2Y2Kr8mhX9eEsvJMXncLEVuXxcodKhXx2gqfJc38tJocDf3YCQpoyj+bUn9b7uAYAL/xrXrZUGORmATs+4DkMXgRsekW6XKLFawwVvbmbN//1RgHb37Xez84BGLIIWDKIPyudedyVoxiAGn+D+RcnfjcKQE5ezO05+Sdw5yAFoN4/mwd1AkCbd4C7hxnUHFSfgtjuT6zPwd4JmHYU8Cz7JJ/Mc8Fzfy0KgOR7wKoxQMRp42PBDSlwrB5P95vcDph2jE4ZA+o0YO+XLHm0pPdsoM5wIC2S48vVDdb79P+dom75NsDyEXTZWNLpc+DWv0CDcXTUJN5iuLIlMhkwbDkzewwCT7OX6dA5NoeByeummrshDXT5Gmgy9am7+Ar0Wky6SyFn0yt0UQL8HTZ6Aag7HFjYh49NPgh4hDzZawkEAoHgkRG+cUGx5nZ8BvQAgjwePozUz80BdxNF4LHgGcejNDB+B2+qzy+T3ictGki4Wfjnkh7L4GW3UuxAYyvz4vTfdNEAQN0RwMnfpffTqNm1qlQD/pyTwQlFo4nMsLB3BI79ZhRxAOZkrJsMtHwNqDOM7Yy3vm197FN/Mv8CAOqNtH0OuZks9xIIBPmTkQCsmWAu4gDA/VPAoZlA06n8WacBzi01d984uAKt3wR6/wK454kBftWA4SuBar1YApVwC6jSBWj7LsVpmQwIrMvOSeHH2K487pq0iAMAJ+YzH0ujphBh65rX6+nGqdLd+NjpvzgOeZWnWCUl4gDAoR853j5LqJMZFm0QcQD+Do/P5fjsFsSxPurCUztFgUAgeJ4RnnFBseZOPOvnA90fLiMHAHxdVLgnhBxBSUarpeNEo6Y7xTVAOlTTsywzC2xNYAAg8TZXqwuT3Ey2F3fx5yquLdTJxg5RroHGAGMpku6Yd4RJusO2t3Yq4PBM6edosoHoS0Drt1gioXJhKUfdESyd0OYwvNTQMtfZx1j+pXID6g4DyrQEoKcAlhLxUG9fIHiuyYxjGZUUdw8DLUy6RcVc4nVoZxLQ6+wL1B8NVOpMoUChMi9TSgwDrm8DmkwCKncFIKOwu+czOhNrDaRLzxYp93mtx12jAJR0h7k91XqzdEomp1hx+m+Wh9UbDVTvy+fGXwOUriwBS420/RoZ8YAuJ9+PqcRxY5d1NzEDx+awpGzHB/yM0KNIT00gEAgEQsgRFHNuJ2TAWamAq8PD/6n6uqpw8k5iIZ6VQFCIpMcCZ5cAR37iaqeTF9DyDaD2EOkMBntnoPnLXDXW64Hb+4GLq4whor6VC/+clc55OTn3AZ+Ktvdz9mVIKAAk32VHmpjL0vv6VmVHrP9+rsIJQ5Vu+QtA0RcZQhp9idlB909ywpceS7Gm8USWV1TsyCDWgNqcQNbsz2yPq5vZCcu7ItD5SwY4a7K5Ei+TsYxNtCsv/uh0/H3qcikMuPo/7TN6dlGn5b/dVAwIaWIu4phiq5V32RZAUhiwegLzdpx9gSaT6fSJv8kcnVL1pctLAcC7Aq91v2p87YA6bGt+bRuwfioFc48yQKvpQKmGbHt+azfH06D6QLl2QLv3KAQZkMmAcm0pIildKADJFRx3lc75fx4lhfjrtrcl3QHKtgYG/JEnckXxWrN3Bpy9i+wUBQKB4HlGlFYJijV34jMQ4O4A2SME6fm5qhCTqkauVleIZyYQFALZ6cCBGcCeT43lSZmJwM4PgCO/WHd40enohom/yRyKtRO5/5DFLAXwKs+b7MLGxZ/5EOmxgJ0jLfdSNJnM0goAOLOQz5FC5UaxJfoif3byYq7G6b/YZti3qu1z8avKVfqAmlzF3/mBMfw0OxU4+AOw9ys6AHxrMEcn/jowrxWwZCCdUEMWsdvWor6cvK2fBsyqCfxSn+VayeGP8ykJior0OODkfGB+a2BWLeCvrgy3zbRR8id4MqRafxuQyYzCjb0jUHPAox1bnQocmsXxT53CxzLigFN/URTKyQDmteY52OrS1/RF4PoOirOabAo2h2ayHNTQ1jz5LrDpVTqLMhOM5V+RZ4D0KGDlaIbMu/ixPKvffCCoLrDrI2YDnfqT4vG55SwFexYIaWx7m281Zp1tfIXCd/x1/h6WDGS52wO7kQkEAoHgSRFCjqBYczs+479OVA+Lr6sKOj0QmZxVSGclEBQSGbG2V5WPz+EE1ZTku8AfHShY6HUsS7i6EVg7Cej1MzByLdvtFjZ2KjpdGk5gcGnv2SxhMGDvCLR+G/CpAtw7xscSw9iKvNsMY6kTwMnWgD8otgBcRR+xhuHDwY0YZtxWIiAZ4HECagNR54Fag4Hj86T3u7yO52RnDyzqx89Mp+V/oVsoinX9liGn+74G3PKcApps4OxC4O8expIsQfFCnQLs/RrY9o5RwEsMY1D2lfWAVpPv0wWPgbMvULmb9LbK3YA7h1kGOmaTMQfnYcmIAy5IZIB1/x+wYhRwYTlLtfZ8DvSfby7yKl2MXe06fkoRd+0kBhnfPyn9egd/YHc7A37V6D5JjaRTp+9vQNdvWKJ5aCaFcwCIvcoAZTt7YPP0/EtMSwplW7FjlxRNp1IIy81kBtG2t/m5RJ6hcBp5rkhPVSAQCJ5HRGmVoFhzOz4DrSs/WktPP1cKP/eTslDG+xmxOAueDzLijS2wLdHmAlmJAMryZ00OcPIPY/imQsm8B9/KQFYyEBcKlGlRuOebncbz0OUC0LM0ovNXPNfeeS3FtRo6auT23G/yQSAnk8KTgxudOb1/4cq9QsksC5U7J17aXHaYyYij26jDx8y8kdsBPWdyYmVYpfeuyPbB/36Rd3J66ewgpQtQox8nn/E3eB6WZCWxy0359lyhbzoNiL3CNuYAHTl3DgN1hlg/V/B0yYgDzvwlvW33p0ClTmxZLSg4HD2Anj8C21XsLKXXM3emej+g/fscI5q9aLt0Kj9S7lu3Jg+sA8RdZ7mUgbhrwObXea0G1KLwq3TmmKJ04lhStQev6fBjtl8vNcLcYeRXHbh3gv9ODAOOzmGHqox4jh0395iPIfu+Adq+z/LWlq9LZ5uVFJLD2RVs5wccK30qc+wMaUxxK+m2cd+4UI71bqX4GW57Gxi9nuOsQCAQCAoFIeQIii2ZORrEp+fA3/XRHDk+LkrIABF4LCh5PCh7xc7kWlAnAzd28N+lm9Lxcmk1cHk9J0whjelIcHuMydODyEzijbwmi6VS90+yvKrRBGbeKJQMQD2/nCvWHT+hm+X4XIok5dtTBLm8gUHE6hQgOyXveclczb62hblA906yRKz3bE7MdnzIiULdEcCE3cZsiqhzeWVPeSvhCnvr8645gF2zzi0F1k0BvCoAfefQhWMo+TJw5xBzdFLC+V5LNweav8Lyi/QY4Mo6TmrslA/+vLS5FJ8eoURU8Jgk3rae+BtQJ/PvTwg5BY9bEK/RDh/zM9ZpgDsH6U5pOlU63+thcJBooR1Yl9enJSn3gR3vc/xr/xEQfhxoOoVdleTyvFKqXGPXOinkdoCjl/Hn7FS6iOR2wOCFdPNseoUidrlWwIgpwPZ3jWHLqZE851ML6FB0slHuVRKQ23G8a/YSUKYZS11P/QmEbgYqtGdnsS1vGEtNw4+wpDU1gqHW2elCyBEIBIJCRAg5gmLLvUSWRj1qaZWdQg5PZ6UorRKUPJx9KVokhllv86tGt4oBhT1t7+7BFBiWDzfmPcSFsnV267cZhCw1GXpcstOBi6sBj2Bg9ThObAyveXs/X8+3KrDhRT7e5h3gwkq29TUQexU4txgYsxlYMcI6vLhSZ6Bca2DZMAotLoHArT0mbhswm+Lfz4Ghy/gZ7PrYPFQ1+hIQ3JAtkAHmaZRvCywfZpzox4VSMOo+gxMw03bjDu7Meag3is6n6AssD+n9C7BsCODkk/9qu17PCc71bTyuVwXm8rgHPzthqMURlUv+2xU2gnYFT46DK5AWyXJF0zbdt/fzOur0Od15j4KLv9HlYSAn3XbJDwA4eADeldgZ78hswLMMnTIGmk6maJ4rcY9QrRdLtcq2ohAVtp+ZWUH1gPMr6DgyEHuFzpsBfwLLR9ABKJPRjeToIS0mlyRcAvhZy2TsTnV7v3FbzGW+9z5zgKWDKdw5eLBFO0BRXi6mGAKBQFCYiIwcQbElPM9R4+/26DfePi5K3BdCjqCk4RoADF1qLtgAnMwMXsigTQOOnhRwGk4A9n9nFHFMOTjDmBNSUKRFAdnJwP7/SU+EjvzC85TJOVkKqGUu4hhQp1CYqdzVetuNnVzhV7kBOz8E7B2YVWOJNjdvNfw6MHiReTec43OZc2PI5Gg8Edj9ibRbY8/nQINx5o/VGsTAY0cvijgABac7B9mtpuH4/IWcuFCGf257h91xjs4G5jThe8uV+F0JCgb3ENuht0H12XVMUDioU4GdH5mLOAbOLmJXu0fFLYjOD9Pf6c3ddMPZos4wXqtH5wA1+jBA3pQjv+SNFxaLRH7VgbrD2cmq0QQ+ps1hRyavCuYijoHMROD0P+xeBQDl27F0q9lLDxYVizt2KoY6q1zNRRwDadHMnaqa13q8fBsg/Cj/XXuo9feYQCAQCAoUIZcLii3hiZlQ2cnh7vjoq1reLipEJAkhR1AC8asGTNrPFc+4UE4u/KpJl4OUbsaV6T2fSR9Lrwfun8i/JfijEnmOocWRX9reJ/oinUXOPsYbe0tkMopMzV8Bzi9jqUJgHa7kxl4Bbu0FSjdhmVXsVevsIIUSCKwNeJSlkyl0C0utwo9xFdmzDCBTAJ2/4Eq5s68xmNSS7DTzsqfaQxmanB7LcOOQxhSw3IKB2FC6jvLrBpaRAGyYxkygkCb8f9R5vod1k4EXT/L8BAWPayAwbDk7jpkKjS5+QP95ojVyYaJOBm7utL09dBuvq0fFvwaztaIvsqyxdFPmaLV8Azj0g/m+FTqw1by9A6BOZBmoziLgOvwYIFcCg/6i4JMex9fQaYHj8+kacvQE2n8MBNVh5piUiGMg/hrQ9Tsg+jIFnBu7OJaVdBJvATFXjEK2FHePcIyt2JljsE5DR2bbdx5cKiwQCASCJ0IIOYJiy73ETPi5qR6p9bgBXxcVzoSLVrOCEop7MP+r3CX//Vz8jG3KbVEQ9nathqUN6bEURewfUO4okwPQG0NPAa7uGkqfag1k/s39U0DYXqDfPL7f0K2cDDadxtVyJx9m7Zi6jWQyoMVrLL26d5IiiU4L1BiQ1/JWBlTpRuFErqBjR6cBes+xPg9TlC5AkylcXc5KZtlAuVZ55RIKoEI7Ph5/DfAonf8kJSsRqNYHaFGeXbrsHIA27wLXtzM7Iy5UCDmFhVwBlGoITDtOF0HsVYppwQ1FNk6RIANgI6PocYN/ZTI643yrANps4PxSwKsi3TNVewCX1/CaLtsKSLnHznY9ZzJHy1aGVU4qsGIkyyU9ylIw1+UC3hWAsi3ognQPAWKuAt7lOQaYYqeiKNXqDSDhJv/WWr8JeJRhV738WrKXFPQ6OpJkJuZ9mYzB9fZOQJcvWap4/xTLsOqNAurllY8WRbdEgUAgeM4RQo6g2BKekAFfl8fLM/B2USI6RQ2tTg+FXASMCp5hnH1YMhJ5xviYezDQcjpLteydGcTp7Pvok4u0GLZET49jK+6cTDpt7p0AghvT7WOJTMYwY9+qdNXUGsROU9lpXOXOTKQgtHSweZlTcCO6c1aNBo7N4Qpv58+ALV8D7T+kIKXTMMQ06Q5zOAwcmMHXqdYLKNuMEysHN04yqvej48fJCxixGshM4HnEXGb74OxUupr0egZ03toL9PiRr7XUpCvVgf9x0lipMzCnKTDoH8CzNODgSQeAwmTCKJOzBGv3J+afTYvX2L1HqpuWoOBQ2FEo8xz9tM/k+cLRkyJq6Bbp7VVstCh/EOo0dkha3J/h5gZUbiw5rdKd5aUXVgLupTheLOhKZ96w5cyq0eaaH1Oj5piQEQf0+IFdlhJuGbfv/RoYsgQIacicnRp92WGv5WsUcHIyKOjeP8mW5QZB3T2YbdafBSHHuxJFe6+yHOtbvs7vm5xMju839wB7vzC6JeUKlqwF1Hqqpy0QCATPCyIjR1BsCU/M+q+V+KPi46KCRqdHbNoTZlFoc4H4myLTQlB8cfJmq29DgK5HGaDXT8xkWT4cWNQHmN0Q2PQaMw0elvgbwN/dgbktgcX9gCUDgQvL+XqBtSmoKCUyIJq9zM4mK0fRMXTnEEOL104Clg3NCyb+yDqr5v5JulfKt+fPjm7A0d9o6z+/DGj7HlfJXQM5obIkLZqi1T+9uNK+bBgwrxXLpAYvBLa/w/ewbjJFpOvbgIELOBns9Dlw6Eeea0YsTQXH51q/RugWijDeFYDVYxmQPKcpH8/JMO535yBzPCw5PIvlcP6PUV4iEBR3VK5Ax8+lM4oaTwJcgx7vuBkxwPop5iIOQBF2/VS6ZdQp/LnFaxzrctK5z7klQLsPrI958k+g189AneH8t6mIA9CJsmo0ReFKndhufORaCr1LBzPo/Y8O7Mw34E/je065D2x4+cFOyZKATMauVFoNO5Id/JGhzmsnAvNbc2zu+5vRsaPTAitHswxVIBAIBIWOEHIExRK9Xo+I5Cz4PKYjx/C8yOQnEGBir3ICPLsBMKsmb+AEguKIbzVgyiGg+atAl6+Bja9Yd766sg44OFM6FNmS1EiufifcNH/82lbgxHx2kdn9CQWSRi/QEVS5K4URhT1w5h+KoP9+QaHEkCcT0oTlRbY4txSo2Z//rtIDuLiS/768jtkYQ5awY5YlMhnQ4lVgxXDzHBxtDnD3MDvWRJw2f869E8Dhn4AJu5ilkpsF1B7MydrJ3/M5x2VAjf58fwk3KVatHmf8vNNjKaLZInSreWi1QPAs4V2BGV9t3wdK1QcqdgJGbWD3usdtxZ2ZRAedFGlRFHBq9OfYY+9oLvhc3cROSkMW07EXVJ/d41q/xXGu0QTbY1JuFp+76xO6bw7+wPHElKhzHOdavWF87O4h5mSVdLJTKejbO3CMS75r3KbXA5fXApFnzZ1WOg3bkAsEAoGg0BGlVYJiSVJmLrJytfB1fczSKmeWOUQmZ6FBmce4ecxOA5YMolW4wyfA1c1chRu3HQhu8FjnJBA8ETmZdItkJhjzY1z9uU2dzFiKeiO4ctzje64wH/nZvGvVmb+BZtMenM+SdJets6U4twRoMpmlXEsGcgW8xavMutn6JoUUt1IMBPYoTZFl2DKev4M7cPRX26+rTmZmxdCl3HfgAjptjvzC9uP1RjFMtP5oTi5u7mbmTKmGDN3UabnyX7EDXXR2Kjp4Ds6Qfr07BzlZKdOCYpizN0NQM/NZTc9KNLqfMpP4b72e76vHTJ5DfqvxmfG2czsEgpKOTMbxpeV05mCp864FjZrODoUdr4+MeLpoHNwfXPb5IPE5O43lPB5lzJ1xABBYFwhpRNdIg3HsgnVpDb/PSzWguKzXmT/H3gloOpUh5/bOwMA/eS/QcBzHWJ2GY4tOy+OeWwL4VDI/xrNQPpmr5u/G2Y9lqXKF0f2k01DEObWApWym5XSpUQyFN5S0abN5HNdA81B5gUAgEDwRQsgRFEvuJ/Em6HGFHCelAo72CkSlPGbnqsM/0wHQZw5zRgLrAjveYznFtOOA0unxjisQPA4Z8XTCHJpJlwnAle/Bi1nOEHmGnVLOLzXmFQTUAvrOBTa9Qrs/wAmR5iGuiZR7trdpcxj661GaYk9qBMN7r23ldq/yQPcZwI73gbhrfExhT4GlfDsgqC5w2saxe/8KhO2jAGUIJPYqT2HKoyywbiIQkZcFJJPTvdNvHl0uyeH897WtLKsyTM6CG7L0Yd0kfo6WZCaw5MtA0xeZgyPVbhdg3k/0Rf67VH1mRrgFUUhLCeekp1wb6ZbrAFCtt3mbdIHgWSMnkyLp+ilGh5yjJ79PA2oCm183Lz2s1BnoOYv5NlI4etFpkysxdsnkvP4izlBg9q1KsUCvByp2pJi0432jS0flBrT/gEHIMZeZveUebBwj7Z3Yzer4fDpwDFToQHEqJ53HM92/xauAg4dRuHD05HEzEkp2lzQHN+YH3T0CeIQAtw+wvPa/75ja3G6ZP+RdAfizE383HT4yhtoP+JPimEII2QKBQFAQiNIqQbHkfl7r8McNO5bJZPBxUT5eaVVOBnD8N5Z2uAbwMYU90OJ1ugNMb+4EgsJGrwdCNzPM0yDiAHTc/N2df5P3jgNnF5m36I6+CGx+zTwfQunCFeYH4VXe9jZ7JwB6oNWb/Dk5nJMnA+0/Ym6FQcQBeKN/9Ffm4HiEWK9eA0BQPa7yHphh3lUqMQxYPYFhpwYRB6BQc3E1cPsgz7daL04WLq0xX2G/fwrY+QG7Rlkik5l3ZAGAY7/yXKTKn+wdgTrDgCvruZLv4s8yspWjga3TWWqQlcQyEimxxi0IqNDW+nGB4Fki6Q7FUdMyx6wkCs6bXrPOj7qxk2OVlJMtPQawc6SIIkXd4RRO9n4FbH2L5Zh1hlNIaTKFY5FpqVV2KrDtHYo8Tafyu920LKrZNOD4PDoATbm1h9lZq8cZRRyAzpt93/A83UP4WKs3eT6JYXQhlVQcvSnKRZ3jf+eWWHzHXKAo5+JvfKxUAwrm2al0WG55A6jWkyLcon5Akg2np0AgEAgeGSHkCIol95My4WivgLPqMduVgp2rIpMfw5FzaQ2Qnc6bD1PcSwHV+wJHfwFSIh77vASCRyItCtj3rfS2rCSKEaf/kd6eHM4JjYMHf272olGczA9nX7b6laL+KJZ1Obgy6DIzgS4VeyceOzvNvJzLlJN/cP8u33Cl3CB2BNVn8OjB76Wfp05m228pAejCcqBsS8CvGrvWSBF7ledmuRJcuSsDji3Z+CIweiNQtZexZXK51swEOvAjW+x2n8Fg5dir3J4Rz9/Tns/YDeeF3UCZltwmtwNqDgLGbjVO9gSCZxGNmqKtZbkSQHecpUBi4MZOa8dcZiKw/QMgLQKoNZAh7m55rh1nX4rUzV7i9WUoZQrdCjScwHDe0C0Uh6U48w+gyWGGTm4Wuy35VGL3vFv/Wu9fqgFderZKpvb/j8LRgD+5GBS6mUJHho2xsCSQm8EFhJoDpAPmAebmpNzj+NpkCkUxy259ZxYBNQdyIeL8MkAn8bchEAgEgkdGlFYJiiURSVnwcVFC9gT11F7OqscTci6sAgLrmK8yGag5kMGIB78Hes587HMTCP5Dk80VY72OjhknL/Pt2hzbXUCULnyeoUOLFMl36QSpPZgTHIW9+fasZK6eyuTsSGXvyH93+YbumKhzvJGv0BFw8eVzLq3lCnlGDK3yMgWFj8OzgESL7i+mZCZQTFk+HKjeh6VfcgXLppTOtnN5AHaPcwtm+KYpmmx+ZpmJ5o4lq9dO5Mp9egzfX/XevJ7XTLDeN/E2kJUCtH0XaDCan7HcntlEXb5gO+SlQ6UndVfWA23fYdnB0MWAOu+zdfQCVA/hhhIISjI5mUDMJeltD+r+qE4x/zkzkQ4+R09g16d02ozewOtRJgOiLlBM7TmLYccxF+mG0WZTlDm7yPZrxV8HshKAKxuAPr9yXGz7fp7jUAL3EI5Btki4QbFH6QL4VQfsVXQmmTpYShp6LZ2QcoV19pAp6THA8FUUfv7uYf6e7Z34eVTtwe8RlQvLcR09+W+BQCAQPDZCyBEUSyKSs+D9mGVVBrydlTgX/ogtQNPj2HGi6TTp7Uon5nKcWQi0fJ05IQLB45ISwTyYMwspCpRqCHT7FvCvSUEFoPDhGmDdOtyrPND9e95gK51t32gH1gEC6tB5YlpGpM1hIOWO97jNTgXUHga0fpOvufMDoO5IZiAcnsV8Gp2G+S/tPgCizgA+VQCnEJ5b5Bk61iyFIlMcPfk+tTnAxVX8T65g7pReZ55VYYl3BeuOMYbPR5vD4xoyKqTwCAF6/0LHgELFcrCFvc3LuAy4BvLzOLOQn1/yHa64AxTD6o0C0iJNjl2agpY2lxk/90/lhYT6SrdiFgieVZR5E/eoc9bbDGOaLRzcjf/W5NBxeP8kyxkbjKGj78YOlptW6szcLSdv4Po2lnTGXaUjLu4aHX++VaXHDADwrgSUb0/X3KEfgSbTgIhTHN+kSI0AytvYBgBeFZgL5OTLnC6/6kCDsfmPh8UdmYIB0nodBRlbbiQHD2Dv10CtwUYRJ6QxUKoRUHsQcPgXYEEXCkMV2vP+6tY+hkd7V3jweRhCsgUCgUBghiitEhRLIpKz/us89bh4uyiRmJkLde4jrIjd2MmbxJCmtvep0oM3NYd/eqLzEzznpEWxc8rxucYb5IhTwJ+dzVvtugYCrd+2fn637xjge+pPCgtSuAdzVXvpIODeUfOb4fibwB8djKVFmmx2tfqnF6DLBZpMBbzKsnvbhZUUKfR6ChULe/Ocr26i+LnhJWDb28DaF3gsZx/p82k0wbz8qXJXYOQ64Oxi5su0ekv6eSo3oHxbrqJbUnswILMDoi/TYSOFT2Wuji8dzDybDVM5Qew1y3w/e0eWb3T6nOJVZjw/s3JtgBp5x67Z39h1SuXG8rKW0znxtFPRHeBTCdjzVf7dqwSCZxE7B6D5S9bZUwDF3nJtpZ9XoYP5uJEYBvzTgwsreh2dc9e2UijQ6+iMXTMB6PI1EFCXLa+XDzdmc11ey7bYhtJIS5q/DCzsRXet0gXYOI1CROQ5aTHn/km2UrclRrV+Cwjbz/Gi5gB27trz+YM7bhVn7ByZ93V5HUUpKdxDAK9yHPu8y7Oj4IhV/AyT7wDnVwB1h7GToF4P3NwDrBwDVGwP/NWNHRKlyMngQsPOj4BVoxk+nXSXxxAIBAIBACHkCIopkclq+DxmxyoD3s58fkzqI9xI3drDSVh+rVDtHdl55uwi21kgAsGDiA2VLkHQ69gVxRAUKpOxDKjF68bV3bItgfCjzJQI3UzXSO0h5pMn36rMndn7FX9u9rLRHZKdBvz7pXQpUtJtTlqq9wWS73El2pLcLIovMZeZj3Bzl3Hbv5+zZMo0MFmuABpPZIiwIX+iTHNOeBb3o2ii11EgaTrNfBXbPQToP5+5VaYTLJmM5VkVO7I0zcUHqNyF16ZpSWZgHaDrtwwkNVBnGHBhGcW0OsONj/f6GTi/HFg7kSVSl9YAaycBR2cD7d4FOn/JSV9OBlC2NdB3DnDsNwa1Xt1IkWr1OODsEsCjlBgfBM8nnuWAIYuN2VwARU//mkCfX6yFkvLt6Jb7b3xKp8NDk03XzLkl0qJoVhIXXyq0Bf79wnybNpcOnj6/mperKl0ott45xFLPO4cohMffYKZNUD12oSrX2vx41Xrx/Qz4wzxnzM6BbiCZnOOPygVYM56iePU+QFoMu1eVRHQ5QNlWQKUuQK1B/M/0O8avGjBsGctH649i7lDdkcDq8SzLDd3CxhHLhnBcLt2Mz8tJZ1B9mRbAhRWA1mKxLVfN3+tvTelYDd0CbHsLmN+GHRIFAoFAAECUVgmKIRnZGqRk5cLnCUurvPIcPVEpapTxfohsCp2Ok8yKnR68b9WeXPE79hvQ8ZMH7y8QWHJjp+1t945TLDBMQJx9gTZvc1U0M44Tig155X96PbDhRaDheN5U56pZamWnogiRFgXUHwuUbsrcF5ULJ0C22msDwOX1vHk3FWgsuXOIk6TwY+aPJ9wCtkwHmr/CgHClC907dw7x3CYf4ETN2Qf4vb3Riu9XnW6Z2oMZKqzJoaCTEccyrwod8kqjcrjKrXSmG+nwT9zm4A7EXmGZ07AVnHCoXPlZrp3ISRvAluFlmgPH5rAsq/3HQI1+LNGIOkuBzJJr24C6o1hadmMH4BJAcejSKk7+LDm7CBi4gK4nW6HRAkFJQKMGcrNZMvWwZUJKJ44fUw8B6fEA9BzDXAN4jEF/U4RWp/C6dfbhtahOZcvr7FTm3PSbS1Fom4Qj0cCdQxRmDaWncgXLrrwqMGj4zCJgxBqK1jkZfC1NNkX0plPp8lC5Aa3f4bVfuQfgX42OR20ukJMF2DtwAWfTqzzn9h9xsUer4fu5uIrCj18N4N/PeB73TwEJNwE9OAaWxDbk2lz+F9wAmN8OqDfC+B1jp6TLcc/neYsMdgyB3/42FwpM0WmB7e8ygNowvt49DDQYx4yiRi+Yi23pMfzusnTfZCUBG18Ghq+0zpITCASC5xAh5AiKHVEpDCj2KYDSKtPjPZC4q7xRCKzz4H1VLiwLOfkHs3Ic3J7gTAXPJVLtrQ2oXAG5hWFS6cRSJ6+ynJA4mtzI6nX8Wzz5B38ObsS/y9pDOMnJiAd2fsy8gvgbedktHrZDkl38ObFyslEiBXD13N6JK9KWJIcDW9+kmDJ4ITs7BdTihCcrCajUFShVz/z1Ndmc8FxYKd19qvYwCkAHvwdS7wPBTYAK7Zip4+gB7PqI5Q0Btems0+uABuPpAqg7gq9VuinDWA2TBEdv5kAoXdn55vTftt/v6b9ZjnFzJ1eWtTlcVbbFtW1A81dtbxcIijPZaSxvOjoHSAoDghtz4u1RBrB7CEFHYUc3nVSXNicv40Q8LYaT+pN/UDRoOAEIrg80ngyc/otjmamz57/j5+VhOXry2pXJWBLd+i06OO6fYDh6s2nserd2IkPKJ+wCUiP53nKz+D1upwTKtgBOLQCO/kSnSPW+wN5vgKZTgL+6sFzIIEJseNH6fMq3BcJPAFW6M+tsz6fApdUUL27t4bitLGFh5woVx9yk2xTjT/zO/yxp9QbFdic/oMlkOqz2f2e+T3YaS3btHfm5O3gwHNnRg69jSuwV28H190/SrSqEHIFAIBBCjqD4EZHMUiiDEPO4qOwUcHWwQ2TyQ5ZW3T3C1byHXUGv1pvlFGf+4QRPIHgUKncDdn8qva3BWMDZQuhJj6M7JSuJQkz7j4DbB6QDKFu8yi4hwY3YtSo9Cmj5CrBiJB0zZZoDdYYCB2y0+64/iiVT1XrR+i5F44m8CfcsQ/ePVGhw5S68IU+5ZyzxAijsdLVoqX5lA4WnU39Kv17FDsC8lsa2xvdOACd/Bwb+CajcKcjs/x8nCqUacmK39W2gcmdO6uwceb2atjiuP4oTrtpDAJcanGBY4luVn6ezLwA9yzD0ekCrBjT5iMS5mdKd76TQ5tJRkBbN9+cWyOfamUxwdFq6q9Jj6EpyC+I5KW102REIHpdcNa+V9Sah/4brbcxmBtkWBGnRwLopQNhe/iyTAx0/Y9mmYWyKOA20e48OQoWS7cbLNKMzRunCDDGVO0skq3RjTs5/uTQn6Jzt+RMwaDHg4g3s+pgiq4Hwo/wO7/otcHUDr+17JyhYDPqH7iBAemwAWELa4jXAuyKdPSpXChat32arbQcPCjkNxhTMZ1aUyGQUvh+U9ZWTye8sB3f+7VTowM9l3WTz/TRqdv9DFr9/Ti2g88mym5+tUGUDtlrKCwQCwXOGEHIExY6o5CzIAHg+oSMHYHlVdMpDCjnhRwHvytIOAymcfVjrf3QO0GRKye5OIXg6dPqCThJTAusyUFuba/ybSroDrBhlXsZToQNb8f7TyzxQs9ZgIKQJ81n2fA6cW8wVUr/qFHEAipaNJnLl2bKrS8dPWQKx5S2GWDaZwkBmUyp2oBtn71c8Rs9ZtLyb3mB7lgMaTaKQc2ah+fOzU3n9yBXG0qrr24AhSxj4HHXeuK9MBvSYyXMwiDgGNGpm/bR6E+g5k1k1uVnsHgNwclWpEyeKhmweAy1eo1BSrSdw+yDgVoolk4dnGfcJqg+0fgPY8oZ56UbjyVyxr9yVk04pKnTIvx26gZx0BoBueNFYkmDvxG5hVXvS7afJAe4dA1aNMWYnKZTsHlZ/tFidFhQs6dHA5tetH9dkA+unAmO3Aq4PKVLmR+Q5o4gD5JU164CDPxgfS40AspLpLKzSnePZoR+N232rcvxp9QbDzKXChbe/DUzcy1JHUxHHQGIYz6NCB+Dmbj6Wkw7s+xro9j86fjLiOKYl3TZ57SrMzdryBl2IAMWo2oMpOGlyAOi56GPakaukkJPOFu1SrioDlbvR0bTnc+sxstv/jGVxcgXgW41jf5XuFNxDGgOe5a2PGVDb9ut5lMk/w1AgEAieI4SQIyh2RKao4emkhJ1laclj4OWsROTDllaFH6N9/FGo0Y+rbVc2ALVsdMwRCKS4sBJICc+z7B/nKm6ZFpy0LB8GTDlEt0V6LLB8hHUw8q09tOpPOUwLv1ZDUcI9hJP/y+u5IqzXMxD42G/mz18/Fej0GfMJ7p1guWBwI5Yj5KqB0I0UWZpOBYav4PWhyQVKN+akZdMrFFYaTqBQM2wZEHmW51uuDW/cT8xjeLgUVzeyfbDhvHRalj90/opC0s3dLB+r1hNIjTJ3BqlcmYWjTqG7x6scEHEemHaCYZj3z7A8Q6cBTsznpKL+aIpW9k78nG/s5Op9h0+Aiu04UavUicGqGXF8nTZvszOOaWt3nZb5Ol7lmQ1xdRPPwxS/6vw8Yy4CHsH5/x0k3qZAY5oHkZvJ388Llfg7SbkHLO5v3lpdmwPs/oSTySrd8n8NgeBRSLgl7bADmPuSlfjkQk5uJh0+ptTsz7HQMhtl10fAwL/Yqer6DvNtcaHMCxuymOcmhSab1+iF5bbP58oGOu8MQg5ANxDAx0/+AbR7n50CDefX7gO6TgziKsAx8fxyjsONJzKDp3IX269bnIk4w3G63kjm35y1EORlMrql/upuXiZrGCM7fQ4EN2ReUONJ/F4Zv4O/D4WS4/vNnVy4cC9lfL6LH/c/Md/69Xr8aB42LRAIBM8xQsgRFDuikrPg5Vww7hYvJyWiHqa0Ki2GK3+PGkzqWZYOiqO/CiFH8GjotcyMCT8OdPuWN7kRpyhQ9PiRq8AAhRGp7lYAELqJDpq27/LntBiupl9cCUAPTPwXiLnCjAgpN8u2d2j9bzSBN9bLhwNjt1AIMux/7DdOYkasBo78Apz9xzzMcttbQPcfeMMff4OhwSn3KC5U7Eg3kRRnF1NEGfAHA4uTwwH/GlxtVacwvDnhFnD7MODiy+eoXPl+nbyZo+DsSyeN0gUo3YjijEIJ1OzL46lT6ChaP5WTvIBaFECO/mp0D8lkwJWNXNEHgJFrmIcTe5Xvx1TEMeXQj+weNuhvCmY3dnGVueYAlq6tm8ow6PzQqOnos9VS9+APwIC/mMVjKuKYsvdrij22Wr4LShaZCcxwCT9GMTCkCcvsijJfxXKssNr+GC2g06LpfIk6z+9Nv2oc60yRK6RfW6/neHhRIjsL4HEzE+mmtdXuW26X//vSaVmeKUW51nSJ3DsJDF0GHJ8HpEVyXDUVcUw58Ttzcxw88s9DK87otWz57lOFpWGBtVn6mhrF+56WrwF3j9rOWjs+F+jyDYW/rGRg5/vMDLp9gGN3cCOGWquT+TcfcZplpYF12fY8pDHHQMPrdfiIfzcCgUAgACCEHEExJCpFDS/nJ+tYZcDbRYWz4Q+o7waMK28+lR/9Rar2AvZ+wdWrUvUf/fmC55PqvYHzS4Fes+huSblv3ObiB4zeyJXHjHxaWOv1xpvolEjg2GyKFKY0f4V5OVW6GcOQTVEnc0J18AeKIG5BLAPo8CngHswJ0P0TDL20d6TIpFCyzOvMQoot66cB/X8HslM46dTlUlS69S/Q7EW275bi7iEAecGiLv4MSHXypXD1W3PjxGvEKmbcDPgD2PctnT8G7B2BYctZMnFinnGS2fxlnueuj4F+v9H6f+eQ9Tmk3Adq9Adu7eV5NhgDVO/H0itb5w1w4qHXA9vfA4LqUmDS5gChW/k8mRzwr277+QDFu/hrtrcn3OTEVKozloGk25wMCUo+6THAtneZ62JAruDkt1pvCjtFgXclXvdSWSQepQEnz0c7XnI4sHgAEH/d+JiDO118944DMZf5WOhmoPEU6WPodbbFTICfnX8N43e5KXI7uj8qdaaDToqqPYCwfeaPBdSii2/Xx3TgtXmHmTyNJtIFaOtYAMfVnAxmneXmOVBKGkH1KHQfnglcXsPvkj6/8r3YOQCX1hjLqaRIjWTp29oJQPQlZghFnTfmpTn5AmM3A+unWI/pI1YD1foC5dpyXFU6i5IqgUAgsODJa1cEggImMjkLXk8YdGzAy1mJxMxcqHO1+e8YcZrdL5x9H/1Fghty4n1qweOdpOD5xKMM0PV/DDw2FXGAvHKq4bxJdg20fQy5gjfHAMUPSxEHAI78TAdNjX4UaSwJbsTjJIcz08DFn5OQxDDmtqweCyTf40qosy9zaFaNoWOg9y9AhfYM/nUNoHCzsA+woCsFiBs7mcdTrbf166pcgabTgMM/A/9+AWx8ibkcMtDlY7p6fmktX+vSGvMbfoCZOMuG0gkz4E/jhOnIL5zYyWTM72kpkfkBsE2xiy8nlF2+4gr0wl7AqrEskbKFZzmGD7d5h46ZTa8CW99i1oZeD7T7yDqw2hJ7JyAgny55fjU5qcmv5NO3CkUuQclGr6cwYCriAHSKrJ9qPUYUJi6+LIuxRK7gdZjfmGSJOpUZMqYiDkC33IqRDG03cHUzBYJGE6yPI5Pnn1/nVoqisVzCVdP6TSDxFp8vFdTsGsA8qusm+TkKJa/tk3nh64lhLKO69S+gdOT4F5hPlouzL+ASANzeZwxMLmnYuxg77yWH01lz/xRdmsl32TUwPxezZ1n+3kOa8mffynyegUod2YVQakxfMpCuJxdfll0JEUcgEAiseGpCzunTp7F48WIsXrwYZ86ceVqnIShm6PV6RKWo4V0AQccA/jtOTOoDyquizjEYVSZ79BeRKxjSeGmNecmJQJAfzj7Mj7HltkgMY1aLs6/xRtiS2sMoImYkAMfyKeM5PpfCypAldKr4VgEC69D23vI14Np2YPAi3phnxAF/92QnF42aE8wqXTnpOrXAWGp09zAfazKZYaNrxtPxY3AIXVnPzIt937A0occPFI28K7It+LjtFCDKNAe8K3C1fNRG4Nwy6/M/vwzwCGHujxS5WbyGr+0A2r5nfPziKnbeyojPazNuUZ7iVZ5ZFte2c0V+1Rg66wAg8gwnEY423Adt32XQ8qU1LGGrOZDvo3xbYMwmoOHYBzso7JRsb2woozNFJgNaTQfsHejesrfRnarDJyLs+FkgPZbioy3OLy26c1E6A3VHsMyyQnv+XdcaDEw+aHssskVGPHBzl/S2zET+Xff+le4X74pcVGn2MjtGlWnBx+qNpCjbUELgAQD/mpz0x19nBkv1vsZrccAfdL7tyyvxaTSRIlVgHTpwm70EDFsJyOyAkGZ8vTrD6fI7Ps9ceAA43h74nu3EPcvaFrUaT+I1fG0H3UIlkcx4oHQzlpO1epOi4sHvOVYuHUz3WPk2tsfIptOAAzP4vQIwq+ycyd9xlW7MJpIiN8s89F4gEAgEVhR5aVVsbCyGDh2Kffv2wcPDAwCQnJyMdu3aYfny5fD1fQxHhOCZIVWtQVauFl4FJOQYjhOVokYZbxsZA3o9u2dU7Pj4L1SxI29QrmwE6o14/OMIni8e1NUoJ4OCz8A/WcKUlcROVtd3sOyhw0ecdGUmGQN6pciIB+RK/o1mJrDVroMHW8Fq1BRaNr0CvHiCYZ+mkxfXQLpjYq9YH1enYcZLpy+tb7rPLgb6z+f1te1tOl/avkvxSq5k0HP4UbbzlttxpfvCCrZKlyI9Nv/PKycDCGlEh5xXBU6iIk4bnT0yOdDxc8AtgMHQSida+5XOdBvt/846+2PP53wPOz8E4vJKoJTOQOu3OPFs9QZXi+1dgICazPUJqMWMoYfFowwwYg2wfrKxTMHZh+U03hX5s3sISxBWjzdmDjm4U4jLz9EjKDnoNPwbt0VyOP8+H2ex4XFw9GAGVEBtjhFKl8drdW8Qg22RlQjUH8lJvV5HUVKuANxL0z2j01LMtndg6LBGTZHZUPZVpjnLPTe+TJF6zQs87x4zWRK6433j51q1Jx2MgXWA0s05JqRFAdnJgIMr0P5DuhavbWPgvCHwuUIHoP4oisFuQRRpbv4LlG/NUqPdnxoFeYOjSOnM3D1t9oPbdxdn1k1iRk7fOSz1LNOcv5Nqfege2/4hx68NU41jpL0T3VFZSfxclC5Ar585RqdGmB8/vzHdcl+BQCAQmFHkQs7LL7+MtLQ0XL58GdWqMbTsypUrGDNmDF555RUsWyaxGit4bjA4Z7wLKCPHIOTk24I8LYorT94VHv+FnH15w3t+uRByBA+Pkzct/FI3szI5y5xSIhhKfG2LMfSz7gh2TDF073DyAko3NWZNWFK6KVefq/fhxGL3p0bhReXK7IMBf/L5VzYYV6uzEgGFioGWtrhzEMiRKB3QaYC1k4AGY4GpR3jeR35meYJOwwlp5e5ApS4MeS7flmVEsZcZHGyKoyczKdyDpUtM3ILoitv8GrD1TT5m78SV/ardAU0WwzK3vW10Bzh4MqPDsyzFH0NWjel7D9sPbJ7OSYzSlcdx8uHK/+/tjcGqZVuyRETpYhRxdDq+R5mM71dux9+fXg+YduSzd+Cq9sS9FNn0ev5duAYa91PYAaUa0G2QmcCsEMM+ChF190ygzAs2Nm3HbUrlrkUn4pji4AbgEYRJqec7eLBcUwqP0nRfOHubP65QWJeCuvoDnb6gYKNOplhin7dA41uV10RwI4qdWUl0zJkit+O2rW8anYXBDSkyhG7mdVW2FfcziDit3uT5b3zFWCIVUItiuModOP4bGx20/xCwU1EouvUvBaSpR5kNU0K7LOnlCsgqdgLqDmfp6N3D3GDnQDGr108sJ1UnA/XHcCzVaQDIgMvrWO7W8yfAvQzvke6fontz82s8TlYy90mN4N926WbGnxPDgPLtOI4+TAdT0/FWIBAInhOK/A5w+/bt2L17938iDgBUr14dv/76Kzp37lzUpyMoZkTlCS4F1bXKwV4BF5Vd/i3Io/JW0ryeQMgBOBk78gtDXp+0Navg+cDZF2gyFTjyk/W2eqMpbKx5gWKJAZ2GK9KQAV2+BlTOXClvPJllSbmZ5sdROtMp4hbAbQs6m7t3stMYPtlvHruDtHiVTpnIs8yBqT00/1BPlZvtFXedhuJms5eB/d+yRMqAXk9xKjsNaDiOJVlxoZzMOvvQRWSg7XvM0mnxmlGoMaX791xBTw43PpabCRz4jsGsbkHAnk8p6tQaACTcBmr04fs6OpuTwMmH+JzbB1im5VkOGDqZLqGEm0BaLBBYi4LOCgux9s4hZvwMXc48oYgzbAnv5MUgZbmCn8XFNQyErjWIwoxhoiqT8d9SGUamuAaU2Emh4AE4ugMdPwF+32/dXck1kGVGJRGXALbt3va29bby7RgybufA0qmHQeUMqMqZP6bVUNy5uomiUFA9LsyYBjaXzisJ2/+d+XPvn2KZUMfPAd9KFK6dfHgt1h0J+FWlE86U6IvAytEs/1K55Yk/CnaqUqfQXTTtGMfeMRtZtlUSUToDbd5mXptpa3eNmqJ8+w/ZtS/2KrPFDEJPl2/pSjq3FAj7lwJXUF0AMjoyu34LbH+X3QFbv8VuZC2n83suKRxo/TYXNw78jwsaDcZSXJfq/pUWzd/HuSUs1W0whk5G0cVPIBA8BxT5t4tOp4O9vfUk3d7eHjrdA1peCp55YvKEHE+nguvw4O2szN+RE3ORq6GPE3RsSulmzCkJ3QQ0euHJjiV4PlA6Ac1f4kry4ZlcRVa5AU2n8kY4M8lcxDHl3GLm2xgmNZ5lmWmx7W3g/kk+FtIE6Pod4FmeP0edtV2CdXwec2yWDKTrw8CZhTyulNgEAHWG5B9C2mAcw5BttQ6+c4CfwdLBzLao2Ano9zsnCrf30bHkUYbvy8WXJUf7v6UzR6FkfoYhrFmKgz+wo9SVDfyv9Vt0//zR0Sh6hW7m6/Wbz8lg1Dk+fupPBkAH1gXsr3NC0/5DTixO/23+OvE3gNT7wK5PjM8H2C0nJ4MdtQxc2QD4VmOrc/dStj87wfOFb1VgzGZg6xucHMvkdKx1/ZoZUSURhR2D1vU64NCPLHOyd2TmTsWOwNoXKOD2/oWPPw5R54F/epqL2DmZdCBeWsOf645kqLoUKfc5FoefZNbOheXA8NV05RncI5ZkJrBs6OYu4MRcjhMZsSx7vbqRgsWw5RQXpAKYSwLqNCA3w1zEMeXIbJaeOnnxO8urHOBdGUi+A+x417jflQ0sX+3/O7DjQ6BSJ6D+aJb2BjemK8tQytb3N47Zpt97F1exO2jPH+hSNZAaBawcA9w/bnzs/FJmHHX+/Mnv6QQCgaCYU+RCTvv27fHqq69i2bJlCAri6mNERARef/11dOjQoahPR1DMiEpRw8PRHnaKgsvh9nJW/uf0kST6Em9AntSSq3JledWVjULIETw8Ln4M3Kw1gCudChVdF1lJnPx0+jyvzbfFzbROw31SlLSeO/sDpeoDQxYDmhyuaMoVXJVOiwbkMsAtGBj0F38+t8w8aLlCe2bBmIo4AI9zbinQ+Stg5wfm2wLrUBQ5/RfQ4WNmytg70oVSvg3Lm4IbAyn3mKtgi6xEPu/KeqD2IIohlbtSzFIo6c6p0Y+hpi7+wMi1QOJtfl7Jd42uOiky4ri6blidr9aLIc2WziVtDrDtTbqc1k7iY3o9SyRGradjaMRqvpdyrYGzi6zfU8xlIMck8Nzeib+TFSOtzyvuKoWi1m/l/a71zL0pqhbTguKHvSNQtgUwehOdW4a22Y+SuVQckdsDMVc4RqjceN1c3QRcWMl8GwcPBh/L7QFdLoVhy1IrW6RGAavHWV/P55cBQ5fSWZgYBrgF5h86HH2BGTp3DrF8y07Ja96WiAFQgPIqTzeR0pXB466BFB7ibwDH5tJFmBLOscCthLnpMmKApLu2t6uT876zHPj7azKN3zOG8dOU2KvMTWv7DvB3d5ad2Tnwb0GbQ1Ev8Q7HQqnFi9BNFO2rdOXPhi5vpiKOgfNLWQ5WTgg5AoHg2abIhZzZs2ejd+/eKFu2LEJCuMJ079491KxZE4sXLy7q0xEUM6JTswos6NiAl7MSkcn5lFZFX3h4W/eDCG7MyZk6teTffAuKDoWC+S8Ab4hDtwL7vuIkwqcyywaS77FjiAGZjGVJf3aijbzdBwzwTLnP0qizC7n6HdwIaPEKcHkDRZEafem+qT0YqDsM2J7X5alcK/Pjm3L6L2DMFmD0Rq5AZ2ewTEmnAdZO5HnUHU43gb0TcyM2vQZU6UGRVJOPkApwImfY59xSdsY5Po9lT96VgMH/sN3vtnc40avclVb8rFSWpKTkE4qpcmNI6bDlFKpys2y7dzLief4ymbFcTJubVzqwHSjdhJ27Gk9iB7CVo7gdoHvCyYe/PwNlWzI82vR3Bpkx1LVMc5a1nV3MiWilrnT8eJUX2TfPMy6+/O9Zwd6RrrQNL/FnR0+g31xmYe38MO9vvwsXQPZ8znGl46cUQR3cbR83LRpIui0dipubybFpzEaKvs7+PJY6xXw/uYLXesWOwJ3DHHNSIylSd/+eInBqpPTre5al++P8MjoGtTksqxq8CNj1Ecsym0wGLqzieFXScCuVvwBv78gxLz2a9z1bptNd03A8vw8M3y0GLq1mWamDJ7Ogku/xeXaO/C5q9Saw90vp1wL4vVK2JcXujDjg5Hzb+56YR0eqXcHeTwoEAkFxosjvFENCQnDmzBns3r0boaGhAIBq1aqhY8cn6BgkeGaITlHDsxCEnLP3kqU35mTyJq9qz4J5sZDGvIG4tYcOAoHgUcjNomiy53PjY7FXGTTZ9l2gSndmEQCceNzcldftJoYrwde3cTXz+g7j88OPAveO0da+/ztmFgxfRUeJRs2b7uhLXPnPj8jTwJlFFI3snYCtbwO9f6aIAzALp+ZAihtZSSxXbPU6sKAr0HgiULEzcHOn9XH9azCAVK8Hag7ghCc3k62GHT0AvYar2xXaAVW78XUur6NI1XQq3S6DF3FyKNUdpt5ITgBirnCF3nIiZ4k2F4AMgEnuT24WINNTQAqqS6HWTkWnzoWVQOUufJ5rAN0UR34BLq6gcJabybLLJpMp9uh1dFooXTj5i79ufJ2rG4Bbu4FJ+1maIRA8C9g7AI1fAC7nlTl1nwHs+tjY5QhgOdLN3XQUrhgJLOoL9P8DqNlfujQpPY77+9dgvpfKhePEoR+NYmpGHPc7Mhvo8CHLeY78wuuw0Qss8VGnAf7VgaO/ckw0EHEa2PYWg+C3v2v9+vaOfP6K0RSTDFxeB9zcQ/F5+QgAeqBUQwD5CCLFFJnSlXl/zr7SJbl1htE5ZpohlBhGca7BOH63nFpg3JabxbFv2DKKXj6V6WK8vR849huFvUF/s0X8xVXGsjgDjl4U1bLT+N3V9Tt+3qa/NwM5mfzugMT9ZGYiRfvMeH5vOPuIMiyBQFAieSpLfjKZDJ06dUKnTp2exssLijGRKWqEeD5Gi9N88HZRIjEjB9kaLVR2FjeEcVcB6LmyVhC4+DPP46YQcgSPQXoMsO9b6W2Hf2YewbWtDO5sNJElBQBDQ+8comNk54fWz9XrKeI0nMBJybUtnIgE1WNr6xr9mTPhVZ434lL41wCSwoCEG8bHrm1h+92tb7LE6tpWo5jS7gPenGenAsfnApMPAOtTgHsmVni/6ixjWDeJooxBCKo9GHAN4gr6lteNEzM7FUsVWlXkee94n4/v+5qTuc2vma+eV+/L93h8LgWUsL1c+Ve5GgUoU+xUnHRahs36VqFIe/B75j3893hVoM8cYO0ECsIAnTZ953EsuLSK2R9p0RTjDCKSvRNL5sq1NhdyADoX9s8Aes16vHbPAkFxxNmPIbdn/uF1YCriGMjNZBlprYH8//Z36FqTypHKTuFEf8OLxscC61L8WTuRJaIKJQWe8CMUoWsPZilWxfbsSLdsKMeNzERpMeD+KeZhNZoEnPrDOC44efO6jrlqLuL8d26pFCLavA3IlUBAdZaNlTj0wJY3gYELgPXTWFZqoEo3CjUL+0o/9cw/wPCV5kJOhQ78DP/qyu8klSudV64BxlD5u0e4mFG5K0Pu933D5wbWZSj+wt7sNApQ4GswDuj2Hd2aptQZwnHWkpQItqq/tcf4WFB9CkieZR7t4xEIBIKnTJEIOT///DMmTZoEBwcH/Pzzz/nu+8orrxTFKQmKKTEpatQJ9ijQY3rltTKPSclGaW+LL/aYKwBkgHvpgnvBwLq8SdDrRStMwaORESfdihzgJMfJG5hykJ1ezi+jgyXhFjsgxV3LPyw0/gZdOwCf71+DocgBtdhK26M0b5zXTeIqZYUOnAjdPwEENeQNcMVOzPRJDGOHkmO/AS+eAl48wcmZQVgCgMDawNFf+O+cDK6et36bq6R6HQAZ823WTeb2eiOBRf14c++a18Fp+TBj6RLAMMz93wHDV3AFNbAuP7PYqxR12n/IrIrsVIosYfuA9VOME7Cbu/l+233ISaIlzV+mw8aU2oOZlXFtm7mIAzAjY814oPWbxrKRzERgxXC2Ez/1Bz/LpYPNxaHcTIpfg/7hirJlLtGNHXntlYWQI3hGcPKii6LXbNvB6QDF1q7fUmhVutDFYYk6Ddj1Kdt8mxJ1Dtj9CdDqDY4Hrd4AbuSVNl5eC9QeQtE78Q6FgwbjWHZpKaaasvFlYPxOoHJnQK9lGVBGLDsl3T/BfexUdEg6efNY4cc49jR/heNiSgTH0pIWbJ6VxM/07hFgxMq8DJtEfgeEH+PPmXndBWVyivnuwSx1C9tHAdtQzqZ0YRbcxVXGstXsNI6DQ5cycD4ng89rPBmIPANU681xPC6UY/uyYRyLDei0wMk/KAYFN6TwBtDNWKal9ftRp9IFaSriAHytlaOZgfYslTQKBIJnniIRcmbOnIkRI0bAwcEBM2fOtLmfTCYTQs5zjDpXi+Ss3AJrPW7AkLkTmZJlLeTEXmUIon0+XXcelaC6LJFIuCnKIwSPhuIBf/sqV94Y+1almJJyjxk4VXvQKZNfUK5MbixRULlwUhUbSidPbiZvlq9tAV74lzk7l9bwBrzFa3yNjHgKQSnhFI5avEYnDHQUgTITeLNuQJtj/vO2d4CxW3lzf3UT28RGnmdpWKPJFKLSY4EmUxhQGn/DXMQB2Fa2y1dA1EWWizn7AgP+pAPu1J9cNZbbMUSz0+fAifnmGQ8q17wSsAHA0GV02MRfpxOp9VtsOb7zwzwxKYATPe9KFJXWTZH+XJPuAPbObNWbk5H33nOBi6vZYev0P9YOHwNn/mFmxPG5Fr9nF/6+BIJnBScvoO4IIPoyry9bNJlKMUBhT+dF2D4KJaZduzJigWubpZ8ffRHo8CmvPTsHlmwBFIF3fsDOdLs/BXrO5PhxdTMdOrbQ643X4qV1zOhq9AKQeJeicZ2hdN9e3Uw3YJkWFJBOLeAYfW4Z0OYdZn51/75kZbYYvi8CawOLB9ERevcIxTK9HhjUltvLtWEO241d/B7yqsDfo0LJ312NfkCLVznGH5tj/hp6PcXzar25OKFyY7ZNTjoQuoXOp5RIframIo4pJ+bTAZp8j+VeDcZKi2YZcfyOkyLqHLcLIUcgEJQgikTIuX37tuS/C4Jff/0VM2bMQHR0NOrUqYNffvkFjRs3trl/cnIyPvjgA6xduxaJiYkoU6YMZs2ahe7duxfoeQkendjUbAAF23ocYPtxANItyGMuF6wbB2BnHZkCuH1ACDmCh0OrZYcQlRsFhLRo6308SjMj4P5JYPVY46rmzT10mrR4jQHbhu5MllTsSNEGYMnR7k/oaIm5YtynTAuucJ5bYnzsxk6WFnX6HDjzN1/35h7uN+gfTsoMN9m1BlGEAbjyWn+MsdOVnYqhvi1eBfyqstSg1XSg6RS+74uruJ9rICCzYzmEKY6ebPFrKJswcH4ZV2trDeIxdBpOtlwDrLvUNBjHCV3SbQY295nNLIWMeODADD5efzTQ+QvulxrB49foa9spBbBlsaMnBaF6IwGXvO40wY2Ak7/bfl5iGCdBljSayFIUgaAkodfzmtNp2H3PclLs4gdU9AMcXMzHGAMN8hx9//QyPnZzN0s2u/+PxzR0kzKMf1LoNEBWMnD4R44ZBjEn5jKvuY6f0JFj6I5UvRfdjFLun9JN6ZBzCaTYHXWO7g9HD6DZi8zSWTrE/Hwd3JlDdnUTkHiTDsH4a4Amq0QJOXqlM2Q+lSlMp94HFvcHxm4CvCvQbZyZAFTrC9TsS7fMf2PkHpZHDV9FNxMA7P4MuLJO+oUSw4Cm0xgwXaYFc90cPfm5LehMsT6/wPzUSMCvBkvZrm8H/uoGjN3C8zQlOy3/vxuDu0ggEAhKCEW+5Pf5558jMzPT6vGsrCx8/vnnEs+wzYoVKzB9+nR88sknOHPmDOrUqYMuXbogNjZWcv+cnBx06tQJd+7cwerVq3Ht2jX8/vvvKFWqhNldn1Fi0vhFXdBdqxzsFXBWKaRbkMddLfi6aHtHCjhSLTQFAkvSooHDM4G5LYDlw4Ges6xLpJQuwOCFvHleP8X6ZjTmErdFngO6fmNd0udWik6Xc0vpRslMoPslM4mdYQA6UrwrSU+w4q5xZbxCB+NjORl0r4Tto0CzbjInMOXbcfu5pWxBXro5HS1Dl7E0YX4bYF4rCkkZ8cyOOfgjxSKAHaXKtjT+bKDeKODIz9ZlSACw92uWQAFcPe8xk0KT2fPzQkmXD2PgqaMHha0/OgDLhgARp3jsQzNZCpWTQbv95bV8fn5la27BDHSuN5Ith1eN4fsL20/hyha+Vc1zJwCgVCOWgMiFI0dQgkiPowvl93bAzBrAPz3Z6U0qgNyrAoVnU+QKCqb7LTLCOn1Bx8yi/sCsmgxPN+xvC1d/PsetFMeTOsNMXrs8RQHT72eDW8bymM6+LAc9tQAIP8xQZAA4/BPvG+QKdqeyRJ0C7HiXpUadvgTuHAECaktnthRn7Bz4uShUXCDQqOkwSr3PcXvHB0CbN5m7Zil0a3MpuieGUXhz8bf9Os3znPgn5vO74fBPRjeoXs8cpOBGtp/vUYbj95Y36ApKi2KYtmUOmso1f6ejEM8FAkEJo8jDjj/77DNMmTIFTk7mX2iZmZn47LPP8PHHHz/0sX788UdMnDgR48ZxFWfu3LnYsmULFixYgHffte4ysGDBAiQmJuLIkSOwt2cJQ9myZR//zQgKFINjpqCFHADwcVYhKsVitS0zkauHHgXsyAGYw3Frr8jJEeRPRhzLgQw1+5mJ7LoyeCHFk+hLXP2t1JnlBrcPSK8aAxRSJu/nSvTYrVyZTIvhhMajDHDvBDDwT5Ye7MobZyt3ZuBule5cDT2/zPa5XlrLEgHTdtpxoXQBqaqxjXb0JTp36o1gfsXpxexspc0Flg5iyZaBa1v5foYsYueXJpMZYHxpDUu3yrbiBMuwEhvS2Ji5Y4lex6ygbv9juLFOx+O5BrFEo0YfIPw48xgATu42v0Zxq+5wik6mpEZSuGn+Ko/tV4PBnkd/tX5t3yrMsslKpghkIOkOsGEqLf8Gt5ApMhkniXYqTpSyUxj27FeNbiKBoKSgTqUAYyqexoVSIO3zK1B7GKAwEUmcvChw1hzAUsfsFAq14cfNj1tnKN1ux34zPpYYxnLFGgPZGc6SkCbAvZMsvew7l+HG3pWBWoM5wddkG8VZA7cPUGQZvwMI3UpxNbAO4FWOTj2ZgqVTw1fRyZMWDRz8AWg8ybr800DEGUCrBq5sYmlS9fEPLp0tbqiTKdb3nEWh/NxSilqNXmDnqVv/8jtLykEK8PstN5PZZ2O3sF24ZZlp9T7MSjPt1Jh8lxlHLd+gU+fYnLx7tTLcZknTqQzHNuXaVp6baRmfXkc3quXvH6ATSJRVCQSCEkaRL/np9XrIJCa258+fh5fXA9rfmpCTk4PTp0+btS2Xy+Xo2LEjjh49KvmcjRs3olmzZnjxxRfh7++PmjVr4uuvv4ZWW/LaQj6LxKSqobKTw9E+n5W2x8TTWYnIZIsJcFwo/+9RCJ0K/GrQpptwq+CPLXh2SImwDl68dwJYMojBjd2+o9PDsPqrzbZ9LJ2GZUJx1ykONJ7CEN6QZhQlvMpT2CnVEGj/EdDsZaBaH3ar6vAJV8/zKx/SZEtPRBRKo0NIm8PV2W1vcyU+pD5wZSMnSqYijoGc9Lz23Z2BY78CQ5cATj58HwdmAAP+MAqtel3+tniZjKVi6TGAvYolaFkJnAyumcjuJ5bPP/kHJxJSZCZQgHH04LmXb8eyK7nJ+kdIE7Z1h8w6+8HAwe/ZUt0QNA1wIjtoIUWggJpA16854a3QTog4gpJHRhwzqqTY+SGQHmX9uJMXBQ7D337pptYlodX7AickShPPLgSq9aRzzdRhUb4d0ONHijDV+1EEyowDcjM4doU0ojihkRhHr2+nMJEWSbE8J5MCa/U+LNkZ/A+QfAcY+BdLxJLDeaz80OmAC8tZPqkvgQs6eh0Qc5GlaS1fB2oP5ed98g9g5RiKX3aqBx8jI45lZqM2mLf5dvEDWr3JsV6Ko78wYB+ge7P3T2xZbsDeid9xOelA9AXr1zUVjbKS+b1Uoy//rkz/biq0Z2aSk/cDPhCBQCAoXhSZI8fT0xMymQwymQyVK1c2E3O0Wi3S09MxZYqNMEkJ4uPjodVq4e9vbtf09/dHaGio5HPCwsLw77//YsSIEdi6dStu3ryJadOmITc3F5988onkc7Kzs5GdbfzST01NfehzFDwa0SlqeDsrJYW+J8XbWWldWhV7lZNjt0IorfOrBkDGWm+figV//OeQZ/JavHvY9rYr64EOH1NIMOBTmTegUuG5bkF08Wx5nT93/IxhnM7e7LySbfJ5VeoCtHsfmNuMYcDDV7BbTNv32EVJisqdrc/XNYA30+pUBgUbWvFmJrLLU51h3MfSjWLKnYNcdd3xPlfPu3zJ8OCI01ylbfUGb7DdStHpFn1R+jieZYF/enCFXK6ge8ijNFeLM+Kkn6PTsIRKrjAPRXZwz8v5sOPnn5vJSaejN106ALddWMVJSONJ0pNDANBkQ5uTAcXEfzkZgZ7vxzUw//KQYswzeS0KHp/EW7ZF1qwkTqLdgx98nIodgH/znBkyOQUVqbwvbS67xU0+zJwadSqv4/sngAVdGIh+9yhFFAN2DsCo9czfqtSJ44sld4+wtFHpyHFj6SDj+5LbAV2/o/g6fBXFA0dPCshS792znFEYjwsF7JwAzxCGohcghXot2jsCA/9mQPCcZmz/PXIt33NuFhcbMuLoerEsYwL4Xg2CybUtLJkdv5OdrzITKbSn3LftMtXm0K0lU7BcbsPL/H3716QL0sWfjq7YK7znir1qfG5wI47jBnLS+R135yDdlUOXcsy2U3Hx5PRCoOtX1ueg04kyV4FAUGwpMiFn1qxZ0Ov1GD9+PD777DO4uxsHWKVSibJly6JZs2aFeg46nQ5+fn6YP38+FAoFGjRogIiICMyYMcOmkPPNN9/gs88+K9TzEpDoVDU8C6GsCmC51tlwi1r9uFDAtVTh2J2VznRAhB8F6o8q+OM/hzyT16JjPi5EuZ25+wNgDX/rt9iC2xSZjCU8J+bzZ7dS/E/lDKweb32jfGMHc5zG76SDxdGbwb4yMDMn4oz5/g4eQJ3hLJUwfc1OX/B1YkOBTp8Bq8aai0zaXN6MO7jZfp8qN+P5he3lDXndkXSxHPgfsxgAOue6fMXXsJzc1RzAcgpDmYNOy9ycwQvpGMoPO5W5iAPws9TrOTbYObDkLOYSV+dv7WG2jkfpPEeATHoSY4JGpoTCLa+l+jPAM3ktCh4f0+50UjzoGjTgHsJr7MoGjiP5BQNrc4GcNApFy4YYr+Gg+hQJTEUcgCWaMjmgTgIC6lD4Tbpjvk9CGDNh7h03L/UBOOZsfYOi9619zMNKjQAaTrDO45LJOR5mxHN/pRv06mTILMfzAqBQr0WFI0WXCyv5c9R5LgI4eACbX2eZW4UOFM13fmj9/NZvGUueVO78nV1YAVTuwly4u0coqOR7DkqgySQ6O5XO/L2FH+W/g+rTOanN5feTVzkuCKRFA91n0PVlQCbnwkZ2Kkt2j88zf502JnEMWSnsznhmMd1k1frQMVbS2scLBIJnniITcsaMGQMAKFeuHJo3b/5fRs3j4uPjA4VCgZgY864kMTExCAiQtqYHBgbC3t4eCpNa7WrVqiE6Oho5OTlQKq1vGt577z1Mnz79v59TU1MREhJitZ/gyYlJVcOjgDtWGfB2USIxMxfqXC0cDKVbsaGAx0OsEj4uvlWB8GOFd/znjGfyWizd1NoNYqBab5YZmaJyYa5LUD1g//+MWQ4NxzNbJuo892s8Cbi+jVkztlY7z/yT133Fk6vTAbWBbe8CfedQtLi4mk6UCh2BFi8DsdcA/xps8epfk7kEdg5cjd/6BjvOjN/B7IjIM3ScqFzpEmowjpMzKWoPAS6t5r+DGzEbZ35rOnEG/MF8jORw3qQ7egFjNjGr5v4J2vQbjKcAtfUt62NfXMWWx94VgYSb1ttdA5ijE9KEkxKfysx/CD/OcoiFffkZu/ixrGpBF/Pw1mNzWFrl5G07v8GjNFJlbniW0heeyWtR8Pi4h3ByL1VqFFTPfEKdH87enIBX6sxA8pxMjiNpEqVZ3hUooN4+YD5+1hkqnWUFsG15xGmOM/3nA9d35IkUeqBqD5bx3NrDsc8WqVF0hyzsxa59NQfxeMd+Y1lWQG2OIWcXA6Gb+bmM2Qi4BT64DOkxKNRrUZPJ3wNAJ1LzV4CVoyjc9ZnNz/7KRiCoLjB8JUOK46/zd9PoBX7WN/K6VtUcwG6C+78FDs/i569OoejiVZ7jryVupei6cfIBLq9nK/K1ExkiXaoB8FdX475XN3I8H/QPvye8ypofy9mXotvhmZCkRj/+X50KnFtMQcjAlQ0UkMZsKpxMRYFAIHhMijzsuE0bY6tVtVqNnBzzTAY3t3xWbk1QKpVo0KAB9uzZg759+wKg42bPnj146aWXJJ/TokULLF26FDqdDvI8q+T169cRGBgoKeIAgEqlgkpV8F++AmuiU9SoE+JRKMf2dlb99xplffKszXFX2ZK5sPCrRjtxRgJvUAVPxDN5LdqpgH7zeXNq6mTxLAd0+pSOGkucvIAq3Sg+aLK50vx7O97YDviTLhLvynTVWE6A/GuwtbWTN+3iroHAvm+ZEdDhYwYmrxzNYzccD9g5UuzZ8BK7XNUZQldQYhi7Z7V9nyVXE3bx5l6bS/t8xY5cKd35IdDjB55zownASYscjfJtOSmKvphXuvAtW9zqNHQdtXmH7hy9lp9PXChFm3JtmCfjUxk4MpsOIynSomi77/YdsHoCV2Nr9ucKK8Cb8xu7GPhcKy+Mefs7DH8+8D9jR6mGEzgBsezAo9exo8qUQ0D/ecw2MnXnqFyBbjPgpCryr9pC5Zm8FgWPj2sgnRWL+5mXGDr7AP3m8f8Pi4s/3S5lWjD3pO8cYMVIlk4ZcHAHes/mQklapPnznbw59jQYyxIqnZbuukurgV2fcIzMzaAoO3pT3rnJmI+zdDDQ+Utp4Qig6O5dwdge/Z9ewJAlHL8G/MGynbhQYN0Uo6ilTub42eOHPDHH4eE/i4egUK9FvY6fe4vX6JRKiwJ6/cSw6WVDgfLtgWbT+D3hVorjdXYKEHEW2PmR8XOs0Z9jb3be/b1GzbyaDh8D+2cA3b5ljplp+a/SBRj0Nxcn/v2C++79imNw3RH8XVmSFk2RqNsMlkSZVq4q7OnsCfvXuOBhoOt3RrdkWrS5iGMg6Q6w7zs6tpT5dDAUCASCIqTI7y4zMzPx9ttvY+XKlUhIsG4j+yjBw9OnT8eYMWPQsGFDNG7cGLNmzUJGRsZ/XaxGjx6NUqVK4ZtvvgEATJ06FbNnz8arr76Kl19+GTdu3MDXX3+NV155pWDenOCx0ev1iE3LLpSOVQAdOQAQmZJFISczkbXd7oW4iuxXjf+/dxyo2r3wXkdQ8sjJoD188+sM6B2+kl1WMpOASh352IPKcAyr3CkRtLBnpwJbpnOVs94owNEd8Ktu3L9ab6467/2KDheAbp5273NyseUNdphKukvxQ6flSvPldQwOBugCMuXUAr6X3Z+wxGjUemuL/a1/+fwmU+m+ubyBN/I1+nESdXYxHUQNxlE4UacYnxtxmmKNrSDhzl/nv9of3JhlFHpwAufowTKptRN5DgolBZyKHYF1Jh1oKnQAVo8zHiegJsOSpdComc1wbindOXHXgIQbdAH5VQP2fg2H9p8CPsKxInhGUdhRSJ50AAjdxEmvfw1eAzkZxiySR8GrHEur4sM4rkScZpC7bxXAszRdEg4eHCtNHTSJtxmafvofBvLqNCzDaTCWYcjaXODsEoo7SXfoIjHtuhR/g+OiVDh7UD2O26asGgN0+Zqvu/l1Y4aZqdMy+gJkcgUXdUpSeY6dkuHO+7+jQAJQKGvxGgX+wz8Zw/pHb2SmTk4GULYFnaAyAGVa8vvk3y/pwjGQFk1BLum2cVv8dX6OnmU4dh7+id8Nkw4w42bP5/ybirlsXPiwdLRe30Hx//ZBoNXr5uHxbkH8ro0N5SKbkw/Dj10DjeW/17fZ/jwurgTavQcoC9HJLRAIBI9AkQs5b731Fvbu3YvffvsNo0aNwq+//oqIiAjMmzcP33777SMda8iQIYiLi8PHH3+M6Oho1K1bF9u3b/8vADk8PPw/5w0AhISEYMeOHXj99ddRu3ZtlCpVCq+++ireeeedAn2PgkcnJSsX2RodPAurtCrPkROVnBd4HHeN/y9Mm6yzH2967p8UQo7AnLhrwJKBnEwkh9OCH9yQN5QBtfMXcXRarnSmx/LfHmV4U7vPZPy8tIb5MJkJtK1nJQF1hwHLh5sHc0ad50rouG0sPVo+gtk5ZVryfHLS2Oll2HIe0zK0ODeTAg7A45qKMADfR7k2wO39wIoRnHjVG8mb5vXTgB7fc6VXq+HKtV7P8OTmL/P1s9MAz/Isfzr4vXnphtyON/vlWnK13SDCuAayw4pHaTpiHL1oldfkcDJg2mJdmwOcW8LJR5MpFGUqdgLsHdgWPTWKExidRLi0KTnpFKGWDaV45hZER1TsFQCAQmOjvE0geFZIjQDmt2FJsbMP3TIJN3mdTjkM+FV99GMasr4AILAecHAGcPovul7kdpyUa7JZNmMINFe5AAe+pzvGQE4Gy62aTgOq9OD1CrD7VfOXgR0fGPe9tBoYuIBdrHRaCjONJnBc0GkoPvT4ETg0k9e8TgOcXwp0+555OHJ7lmJmp3P8vH2AZVdaTf5d94ojWi2wfqp5yWhmArDrI3YaC6ht7BalTua9TnYaxZkKHSjGZSQwpH7wQrYENzt+XtZZ9EVg40vAoEVAtV7MFrJTsZRr10cUygxCoELJ30v7jyiw52TQvRN1Djj8M3+3HiHMJsxOBzKv8PvGJYCCo2sA/6vQ1ngeOZkU9XIy8+9Eps3hgkliGO/vVA/IhhIIBIJCpsiFnE2bNmHhwoVo27Ytxo0bh1atWqFixYooU6YMlixZghEjRjzS8V566SWbpVT79u2zeqxZs2Y4dkzklhQ3YlJpxy4sR47STg43BztEpeRNqOJC2QmhMDpWGZDJ6CYwuBkEAoA1+P9+aX5Tr9excwZAF0lz6TENuWquCK8eZyzz6TcfOPSjxX6ZwPG5LAnq9ROFo5N/SE8kslN5g126GY/t6MlSq9XjGYQM5NnSp9DebhoCWqGD8bwB3oAbQkQ9SrOl69nFPF7EGd4kH53Nfb3yBBp1EnB0DldCR6zmCvCxOeaBzsGNgKGLgRWj+L69K9LinpnM62vAAmDvl7wR7zUL2P4eV3cBXoc1BrBN7bzW0p/r1Q3A5EPAkZ8pxhhWe73Kc9KWncauO1Kr9ABdP4bfR+yV/wSc/17/cSaxAkFJQZsLnPqLQmjUOfNtOg0n2D1+eLKSFDt7fp/GhRqPe2AGW4MPXEDH3L3jHH9MRRxTTv/FDJ1qvZmpcu8Eyzs7fgYc+YlO3ZwMIPIsMGINsPVNoNV0IGy/9bjQcybLgxRKlpiumWAUPOQKhu+WakCH48C/oFe5QqZyffz3/zRIui2d+wXwsx/wB/BnJ7ZpV6gorlXvQzHl+DwuUBjwqwZ0/ooCWWok3Tv2eWVmrgHsBLbhRfM24pU6c/y9uQco14oifUYsULUbsOk1llwZKNOCfwcHZlA82/URS2ibTuXz/atzH8NrGshMYIv7g9+zRKz3z7Y/j9LNgEtrmQPXcDxLyVz8HukjFQgEgoKkyIWcxMRElC9fHgDzcBITEwEALVu2xNSpU4v6dATFhOhUOmUKy5EDAN4uKkSYOnLcggqnY5UpvlUYpqjTlthWw4ICJicdiDpre/vt/UDjidKlCCn36OTxLMPVZWdf/h1nJlrve3M3BZVeP9MVc/IPoMWrzN/JiGXbVkPA5L1jFEfCj7JMa+Vo85BkbS5DL7vPMDpOqvbkROXg97yB12n4Gm3fY35Ouw/Y9jwrmY6fqIu8GVc60wlUvi1f7/4Jo509M4GlXLf3m7+X+yeBXZ8CY7cACbfoSNr1MdD6ba6g6zT8zMq0oOvIVHAx5GO4+PGcL6+1/qz0eh7z4kpzsSsxDNgwjZ9h2/e4amwphjUYB2TG86Z+k0SZbpO835NAUJzITuN/crsnn4zmZAD3jtreHnGC7pQnzRYp3YxijkGkDT/KCX3L6UD/P+jOib1s+/m5WRSbfauw9CvmMstFy7VmPpedI8fBXDWQeg/o9DmD2y+sMD+OYVzo+i0dOOsmm+dn6bTA2UUUKNQpHM9KNWTL7JLE/VO2tyXd4fdCpS5AmeYUZsL28d5KoTQXcQCWn255g6LXxpf4PeMWDEzcS0fNylFGkc7AjZ10VbZ4jU6odh9wnN48naKdKXcP83W7fJOXOacHrqxn6W2lrhR2BiywFtVv7TWWzWpzefyyrazFQIU90OIVdlHU6/hd514aaPYSoBD3dgKB4Okgf/AuBUv58uVx+/ZtAEDVqlWxciXbGm7atAkeHh5FfTqCYkJMnpDj4VR4woqXsxJRySaOHPciqHP2qUp3hOkKveD5RqHM3wnmVZGTA0t0Orpb2n/Im8erm4A9n9Hubau9b8JNij8JN4AOn9AVs+czIHQLRZ32eXk27iF0s1TsBERfst3p6sJKhpf6VWcY8JKBdPAMX8mV8NgrvPkeu40rtL5VgBf+5TVQsQNXxP/9ksKTVwUKO6bCioO77eDiiFNc8U+8RVdP0m0gPZqfZWIYS8virtl2zZz+i6vFttDrpB1L6bE834QwYOQ6hjkrXViC1uUbdh6LvczQ5sGLONlUugC+VaDv+xtzGvJrvy4QFCW5al7jaycBc1sA//QEzi3j3/njYqdiiact3IIpkjwp7qWYl9PqLYpPDu4cs8q3YUbVkoH5P18m47WZfA9o/zHQ7EWGK0edB65tZ4aWOoX7yRR5zsb50sdKjwVkdhSfLUPQDZz6kwHxZxdBlp0CpD3BZ/wUkHmVt73RwZ3OmnYf8HexchTHwfqjWGomRdJt7jt8JbONVo+j0ykl3FrEMXBlPX8PWg2/dyp1ss4pMhC2l98RSbeNj51bCkAH1B5K8SX6MqDOC6RPjwH2fW1+jN2fAg3HUZg3lOdW7gIMXcYSOdPr5PBMfgcJBALBU6LIHTnjxo3D+fPn0aZNG7z77rvo1asXZs+ejdzcXPz4448PPoDgmSQmRQ13R3vYKwpPW/R2VuJ2fF7ni7irQNk2+T+hQF60Im8I759inbhA4OxjdL1YIpMBDUazo5Ql2mw+N+UebzYNXFzJ1q6m2S8GHD0BvxpcwV4z3vh4VhJXFpu9CNQdTpu4JoftYU//bfvcm7/CcE/TVrGn/waubWN3maNzaId3L8VjdZvBidGBGQw9NnB1IwWbsdt4jhnxfFyTnX+OhDqVAlCHT3mTfX0bzylsL/OoUiNsPzc3i6vGlsjtGKjp6MkuKfdP8ubfdHKWEceV3GNzmIUReYYTuHOLuaoPsGTDsxzQ6QsAeiD2KiK8W8BX5QXR30lQbIg6D/zdnS42gCLl+ilAnWHMInnYVuGm2DtSXL60Rnp7qzcAhwIqK3IvBbR9l7k10NNFsWIUEJ3XiUidTGHa0HHOlEpdOE6E7ef21AhO2H0qUyheOphjwOV17Fwnl5t3UrJEqzafyMvt6Pqr2p3i+r3jgKM3x7XcTEBVwgTd0k34u5US9uuNBC4sB9p9yHuc7j/ws9LmcnHBFpmJvC9KjeDf3JUNxvB9KXyqACp3/i4c3el2avQCXVKmHQINZFm4UzVqPsctkN9BJ3+n46ZGX4pDibct9s9mWXFIE6Dr//i8c0v4mOXfQlaSeZc2gUAgKGKK3JHz+uuv/9clqmPHjggNDcXSpUtx9uxZvPrqq0V9OoJiQnSqGp6F6MYBAB8XFaJS1JxYpkUDHkXgyLF3YOBffhZlwfNHmZYUIAxBwQBXtQcssB3ArVDR7XHid/PHL60BqvUEylrkvzh5A31/4+rkjnelj3l8LtB4Cm9GN74ExF9jqKgUAbVppzcVcQykxwB3DjN7IjORK7X7vwN+b88bdlMRx4Amm92u6o/lz3IFS8Zk+Xwt6XKZi6DJpLgScYY36i2n83XzCy9XulBo8a1ifMzJi7k8eh2wqB9v1mND2UElpIlxPxc/YPu7zF5Iucuyil0fGUUcgKVqwQ3pGMqIR7JPPfx2KgVxaeJGX1BMyIgDtrxuFHFMOb/MmIn1OHhXBHrMpJhhQK6g6BpQ8/GPK4XCjv/dPUZR1SDiAGxn3WuWtesxqB7Q8VMGIcvlQNMp/By2TGc5pn9NisSHZlGM9ggBPMpS4LWFgwefB3C8HbaM7rvNrzMzJ/4Gy1BrDwPsndnFqQSh1+QCw1bQfWNK5S5AUH3g/mlm6Jz5h8G/rgEsH5MSzA04eXIcXTWGbsym0/i5S9H2PQqEGbH8PpnbEvitOUPoB/3N7yRTZDLrkmSlM+/BNDmAb2UKeNve5D2gQklnpRT3jgMZMRSxTsyXFvScfQF7IdMLBIKnR5E7ciwpU6YMypShJXf16tUYOPAB1ljBM0lMqrpQ83EAwMdFifRsDVLvh8INyN8KXqAvXNm6nlvwfOPsTVdOg7FAzCW2//atSpu/ZRijAbmcN5+GwE0D2lxgzQtcTW//Ie3tPlUAF1927dBk2y6b0Gkp3pyYD7R8DVg3hXk2Klfr1c4yzRhUaYurG4FWbwI+FfNW/LXMTrh7xPZz7hzkivjB79lpKvIsV7SvbrTet0xzugnirzM8tf1HFHB2vsfJ1IDfeWPtVV5abKo/im6brt/xpjw1goLamgnGzA2A7XTvHgKGLKH1X+XGyamTF0NRYy4bM4EsSboDpEYivspQzDypRlauDtmaB3S8EgiKCnWKufhoyd3DDKV9HBzc6Eqr2J55KDoNr0tn34Lv7qNOAw7O5ETb0hGRfBfY8ibQ/gN2wMvJoBCRch9YNoRi+em/gCOzmfmlTqErw05FATcjgePzn50ZaNz8FZajWuJWipN8jxAKvV2/pdibcMu4z83d/Ewn7IJe6QKZc4D1cYozUeeBUvXptlSn8rPyKkcBff0UoPmrHG9VbgwqHvAHcPcoUG8McGKu9fF8KlOE8anIn6MvAusmAeO2s+V7lIkgV603v9sc8o5t6rC6toW5Q0MWAUuHGB1AlTpzQcGAQUgM20dh3j0YOPUHGwqok5mTNGABw4sTw8yDulWubFcvt+P3cnoMhaLSzdixKjEMqDsCcAksmM9aIBAIHoMideRoNBpcunQJ169fN3t8w4YNqFOnziN3rBI8O0SnqOFRyEKOt0teC/J7twDICrdjlSm+VZhRkpVcNK8nKBk4uAHeFZjbUrkL3ShSHTUizgCb3wA2vmq+2m1KbhY7qDh68Gb17ELenCbdebiAzZjLXLF0D+ZkZOQaulcMKJ15k2xLZAIoRvnXYlcrnZaPaXPzX51V2HOSVWsIO1NteweoPZgtaE3dSuXbMXA4dDMwZDEFsMvrKLp0+pKvt2QQu1UNW87JhwG5HVB/NCeVG1/mTfm+7/j8iFPmIo4BTTbzLZq/ykmMoydXjsP2UkwbuhSoPcTqaXo7J+jrjkSORo9mpZ2RmJ4DJ3sRhCkoJsgU5teVJU+aY6N0YlZWlW68hr3KFU6L5ow44HheS297ifEt6TawfhrzuADmAWmyOak/9SfHiwptKYqrU4E7h+jm0+tYwnrydwaYrxzFkOJmL5s3RgioBfT+hd0CE2+z41JWsrmIYyA3i6WlcnnJC8V1L8XMMxd/4MxC4PBPwPIRfN/1x9BtFLoFCGrALoKpUXQpVe/Fkl3TBg/BDemiSY0yfj8Y2PQK0GcOxXoDdYZSgDHkvFmSk87Mtuq9+TddtRfQZCoXJQB2rBqzGUiLpCP6/ik6htyDKS5mp/F77+om7l+zH7N7/GtSnBq7hSV67qWAsZsZaj98Fb+nZHJm7lTunP/1JBAIBIVMkTlyLl26hJ49e+LePQ7Iffr0wW+//YbBgwfj0qVLmDhxIrZs2VJUpyMoZsSkZaNKQOG25vTOa20eGRWBKq4B0l2BCgPfvC4JEaeAih2L5jUFJZ/MRJYBHJtjfKxyJ05ccjOt9+87j+U+103CgnVaoNFEiolx16yfo3RhSUBuJl0oQfV4g6vNYRmCnQNFHGcfQC9nS92be6TPt9ZAQC4zd59FnqXzCDOln1OtN2+8vcpyJVybw/KmRi8wXNIQ5Hz/JFfUe89mjkVqpPEYR35i6LBGzVIGWV7mjYNH3vPtmeFj6DgVfx1o+w4DWI/9Kn1eAO3/HT/jcxb3N8/fOTEf6PIVJ4dX1vMxuR1kzl7APz0Q1Hs2fJy1cGhcDf5u+YhfAkFR4uQFVOhAp4glMjlddwWETqeHXF5Ik9zkuxRdwvYBg/6y7iploEp34OIqoMPHdMbs/sS47fwyoHx7oMlkjlGp99lVqfVb7GQE0PEXdxWoNYAigy6X40n8DTpS0mMpcqhceS62CNvHMPmShrM3Fxh2fUxxzqcSxXm5HQWQLdMpbJRuSvditd50ZapT+L1TcwAdUQol3Td/9+DYbFmmFH+dAdSt32KZVWYix/OAmsBtG63kAY7RgxcBdUdSpIm5wr8He2eKfX/3MDpYL6+lgDPwL5Z1nV3Esq1SDYCYi8CBHyjKjFpPAcfFpNOgRznmHi0banRiXl4LHJwBjN3K1uYFgU4nnY8nEAgENigyIeedd95BxYoVMXv2bCxbtgzLli3D1atXMWHCBGzfvh2OjgXQ0UBQItFodUhIzy700ipPJyXkMiAiPqloOlYZcA2i9fi+EHIEj0BimLmIAzAfp+s3zGAwLbEKrA3oteYijkxGd0tcKMuuVo7hKuZ/2+UsBzBk0oQ0YUnWtS3AiT+AoLr8e42+TPdQRiyt6FW6URgxJaQJb371WnOhSafhim7L14FDFmKOezCt6StG8Aa781d8XJvD92353qt04wq7qYgDUGg5/hswbCW7mfz7OYWnSp14npYh0EonlnI5eACu+djiVa78jPd8IR2ivPMjYPgKo5DT7X/AmUU8v+NzofSpjLaNqhTeZFYgeFQc3Fla+FdXTnRN6fHDE7chT1fnIjJZjVWn7yEyOQudqgegcTkvBHkU8P2d0pn/VyfTdVh3BANpTfGrTmHq0hqKzAbnhSlh/3JccfZj2+k2b3MyrXIFag6kW/LCcpZhb3yJDklLNGqWj1rmyJiicuN4nJXCwN6SgsyOzmVtLsUcKVRuxjDkqxvpxFG5cSw8vcB6//3fMXdHYc/jAnRYuvgCl1bTsVOhPUuY5Er+LmyhcgVu76MLtPmr/Pvd+QGzijZMsy5Dzohjq/FR6+ikykmno8jBHej9MwW6DdOA0RalvenRXGCwLKfNSmJp2JAlFLweB62GjqPQzVywCKpPl5F7aeZACQQCQT4U2Shx8uRJ7Ny5E3Xr1kWrVq2wbNkyvP/++xg1alRRnYKgmBKfngOdHvB0LlwhRy6XwcdFhYjkbCAon1DUgkYmoyPi3omie01ByUanA079Zf142D7edI5YzQDhhBvMeanWmzeUpvjVoCPGvwYFkEF/U+iIuUTRpXIXdpyq0Q+o3I3CxsLexpvfK+uZIzFhB7Dvf0ClDsDyUUDHj4HqfSmS6DS0wPtWAxb1ZQeT2kOYQWHg5B9c+Ry3nZOtnEygVD2WYGx8iZOAip1YZmbnwImRJeVa84b3xk7rbSpXhqwu6svJmOnrdvqC7+/yOj7m6Enx5t5JOvKGr7TdpavBOCAjkZ2xpNDreOPf5m3efDt6coUaoOOh3ijYJd4EvIooi0sgeBh8KgIT/wVu7OL15B7Mv3WP0nToPSYZ2RpsuhCF99Ze/O+xLRejEejugBWTm6G010OUeD4sbkF0F2Um0kXTezZF67OL6QCpNZBj07qpdIVcWmf7WOeXsnTz/DJ+Jv416MTRqIHf2+Vd59c5rhnEZYU9P6vsNI5fZxYBnb+gy0OKhuMhg55OlZIk5KhT6L6pM4wiA8Bx084RyE6hiF6zPwPoDZz8HWjxGgUwW9w5QCfP3SM8tkdp4I8Oxu1X1vOxvnNZymoQyy2pP5pCjL0jf48j1tAhlRZtu5vU3cP8nR6dA9w3cY+eWwrUGsT/MhPMRc2kO9IdsgA6jWKvchEjv3bttog6B/zT0yiGXdmQJzatp9NJlG4JBIJ8KDIhJz4+HkFBQQAAd3d3ODs7o2nTpkX18oJiTHQqJ26F7cgBAG8nO0TGqbjaUZT4VOFqlbDOCh4KHVebpbiygTfA045x5dNOyfatluVWdipOanRaCjpLBnKV06scb0yXDeNkp3JXihF/dbVewQyqS0Gpajdg4yuAJos5NM6+QPk2fI2Iswyx7DmT7p7mL7O8KvaK8Thh+zgRqjUQSI/j6uv1Hbzh9q3KvAVNNtDnV2DdZPOVT9dATgwy463PD+BE4MQ8cxHHwO5P6Jq5vC4vzPR34PJ6vl61HmwN3Pxl4Mgv5s8r1QAo15Y351KvaSAngxkbK0cz6NOAXsf/TB1QAkFxwaM023fXG80ckwL4TopLy8b76y5aPR6Vosa3265ixsA6cFYV0C2nayAwZCnFW42aLgpHL6DldOhr9oMsO40txtXJvO5zM2wfy8GdpTU3dvHnmMssIfqzk/Hav/Uv0DCvC1XVHnT8ZSbmhdM7AlW6UuhoMoWdAE0JacxW19kZgEPh3+MUKNpsupacfVkqFdKYwk5OOkuZ0mKYDZRm0oI9JwOQ2/O7whY5GRSDZHIKJ4v7W++THM4Fgep9KNicWWi+vVwbLkao3ICG41m+pdfT3ZOTz+/b8PqmIo6Bi6uAAX8gLSsHMzZcQoMynmhc1hOBUqXMpmiz+b3Y/3e6Vx+W1CiWeVm2d9eo+fikfRQtBQKBwAZFJuTIZDKkpaXBwcEBer0eMpkMWVlZSE01r5V1c3uEQVDwTBCdQiHHq5AdOQDgrcxFhN4H8CjiVTG/qsC5xewQ9LhdQQTPD3I7Ch+hm6W3V+zEEiE7JVcDD3zPvIeYS7yZBVhS1eI13lAbVq/Dj/I/AzJ5XkbAZekVzGq9gP3/Y3iwaa5BRhxwcTVvoB3dgXmtjM938Wcgcco9ToD8agCBtYBFfXgOBmoPpTMg9iq39f2NJUwTdgPXNnNyEFiHx9v8GidJUlk/5VpTSJFCrwMSwoAePwJlW9JF5ORJS/zhX/KuRT0DksP2UQwr04I31itHAUOXsETDVJQyxb86sHoCb7ZNyy58q/K9VOos/TyBoDhgZ//gfR6So7cS/ht6LNlxOQbvdcspOCFHrmAI8bTjwM1dHL9KNwXKteL1vmo0XYbDV/JnZ18g/Jj0seqPpYvEgLMvA+ZNx0O9Htj1KTvjrR5n3hVvymEK2StGMBB92LK8sSQLKNuKTo6IsxzLZCVsEcfJm04VJx+g3QfAmvHm41ydYRxXTanQngsHZVuzs5QUlbsyh6jOMNtjK0AnTvNX6TitOYAOMk02RZzUCObUXNkERF+goDPgD6BiZ34v2sKjtHSprIGrmxHX+FMsPHoFC4/ehbezEkcnV4BSJoPkH7izL3/XN3bws3kUISczntlvUqTH8HtWCDkCgSAfiuxbRa/Xo3LlyvD09ISXlxfS09NRr149eHp6wtPTEx4eHvD09Cyq0xEUI2LT1LCTy+DqUPi6oo88A/f1PkWbkQPwRk8mF23IBQ9PcEOWLFmidAFavcEsmpQIYOeHXJms0hUYtJDiQ5VuXHVMvstJRJt3pY8zeCHFDmdfTnraf8gbYgMKJQM+pcqdHNx5077rY/NJT3oMV7OVrmx/7l8DWDLYXMQBaL2/to3XRb95PI8yzYHkO+z+kh4LHPuNIk1yOANNu35nPRkyuF9skZ1K8UevYwcSyNi5RmFP4eb4PL5G7BXm2+z8kMJRZjz/a/O29ASsUiceV5vNfU7mOXJkcrZUz0594swRgaCkkJ6tsblNq9NDZ0vleVzs7BmS3ngi0GsWSzy1uZAtHcSx5twSYPlwIHQTy3ikvvOdfSjmmjou7FTSYfLNX7QWcQBe54Ycl2NzgFVjgdhQjiU73mO5ZZ4IrtflFsQ7Lzpkcn6PtHodWD7MOiPo/DJ2CCvbij+7B7Pr08EZQLMX+f1hSXBDfr5Jt7lQkR5j+/W1uUBSGJ2VJ35nWW7SHXa52v4u29AbRJvsVP6+Vc4Mrq41SPqYnb+kOGWL3Aycjs7F/H7BODTGF5v6KZGllUHfZJr0/m3e4feJXk8H56OgfcDfgybn0Y4nEAieO4rMkbN3796ieilBCSM6RQ0vZyXkRVAL7KOPRyz8oVE4Ft0fP8BJt2c55uQ0GFuUrywoqbgFsQ34qT+Z45KbBVTpwY5LXnmtwTNi6dzZ/o5RKLF3BFq/TSdJ4m2WDkVfYLeOo7/klQ5UpgNmx/vs/GGgdDNg4J90mWSnslwguAlXZOUK87ax1ftYBwmbcvovtumNOi89MQIofvT6mZ2oAE4I2n3AziaX17LTm9IZqNQVqD+SYsyw5SyFun+SQonKjWJRzGXp1/CvwTKLvV9x9X3fN3w86hxv9s8sZMCy6ecAABXasWNK0h12Jtn3DYVYZ19a/T3LQX/6H2DUeshCt1B8KtuKn7dMwc5b+QWgCgTPEM0qeNvcViPIrUgWanDnkHmZik7DTJR1k9jeOnQLy2f0Wranbv4ScGEVM3Iiz/I5qZEsrTJFoWTplqWIA9B9U6UbnYPpMRS1TZ0oMhkduXot9ApnlKzEEz3LUBNu2R7DT/3FroF+1bigsPFlfhcd/gkYupQd/u4c5DhdZxiFnLWTeDylC9B3jvRxAbpFMxI4tkuN78GNgFMmgco6DUtno87R9VO6KXPe0qJYJtz0RTpg8nG56GoOQgdPLbzWjaHY5BrAvx3/mmw0cPJPuk39awJt3+ff1L0TdGGqHsGNA1BIVDpLl4LZOYiFAIFA8ECKbC7bpk2bonopQQkjOlVdJPk4AOCdGwkdAhGdoUewaxHfUvlWNS9rEQgehHspoO17FAX0egYSq5zN91k/lTewIY0pMChdOXlwDwHOLctztsyhiNHlG+6r07LrkqV4EX6U3aWav0zh48JKYOxm4PRCnsPxecZ9XfyAyHO2zz05nO1/o8/b3iczgZMqBw+g/ih2v8pKoqW/2cvsRpOVxK4xCzpzMtX+I97g1xvJMM5Tf7Jt7erx1s6cyl04CWk4AchJ48QhoC7FmYx4TvpKNQAiTps/z86BpVxrJnICEH8d+p6zEJepwd1kDbJ1CgS7yJDRejb8ff3h41uNgc4yOWCvYhccgeAZQKPVISYtG6lZuXCwl8PLWQV3R+uSrCAPB3Sq7oddV2LNHlfIZfi8T014OasK/2Qtyy4NpEYyI2zMZpY42akoAqweD0SeYY6WayAn/Hodx8XaQ4xtzVUudOdJceYflhF1+RpYM8F6e/0xgJ0D9JpsqJ3dUYCRz4WPVstx9uivtvfJTACC6gAeIcC+b41dBe8eZqlv3RFAp88pVuz5HDg8y/jcnHSKZOXbM4vHFLkdx3WFPf9t2TGqfFs6Ti1zyOKvcTFj82t0rvb4gSVK8TeArW8CWYl0n4ZuNc+hc/AAmr8CeWAdeOVmAAP+pHMnsDaflxjGRYEGYynmJ90GwvYCWjXH/cfp+uYSAHT8jMe3pP1HQsgRCAQPRPS2Ezx1olPU8HAquFr9/PDNDAPQAJHpOgS7FnG9ul91tnY2hL0KBA+Dwl56BVGnAy6t5Q1uqzeZ/bLnC64KK5Ts1tRkMqBQMfPBNCNi+ArehEpx9wjQ4nVmxbR9h217q/VkqK93JbpoksMpLAXWodtHiqB6XKH0q2H7vXmWZYbEwAXA0dn8T6/ninjXb5mT41YKOPobO5Ic+pHnU7M/u45EnuZKePW+wISdwN6vja6ZeiOBUo3oilk/lfvaO7E9bv/fuSq88wN2Rok4zUlbdhonCA3H0wXVdAo/B2d/4PTf2CLvh6UXUjGyaRnYu7pi8Ym7+KJPEODk/3C/S4GgBJGUkYPNFyIxY+c1pGZxIt2yog++7l8Tpb3MBWUvZxW+6lsLrSpFY/6BMCRm5KBxWS+81bUKKvg+fjesRyKkse1tHqU5Vm17mxkr3hUo4gDAljeBPrMZJH91E3B2CdB/Pks9j/zM72wPG93n0qKBwzOBzl+xbfXeL4HoSywzajKFr3XsN6DpFNgrSpYfBzIZ3S1lmjNQXgrPsnTNLO4HDF7EEPxTCyjAl2lOMV+hAnJiOa7mZtJd4xFCgT0xDKg3HCjfms/LTMj77nmPTpfwo3RhHp/Lfzt5M3jZryqwfIT1+ZRuziB9ADj4A78LXQOA69s5vvvXpGtq5Bp+l4VuAbzLA91/APZ+A+z9wvgd1PEzwKsihX+A573zQ+NrKZQUhRpP4kLdo2KnZAMAz3Ls/BV/nX+X7T7gooa946MfUyAQPFfI9PqCLlx+tklNTYW7uztSUlJEMHMB0eGHfajk74oxzcoW7gtps6FeOBTjNO9gZjsH9KtcxB0kMuJYYz94EVC9d9G+9jPIc38t5mYxYDMnA6ja0/wG00BwI6D3z8Cpv81vxAcvtB0QDHBCcns/kKvmjXfiba44y+VcfdTrmU+gdADmt7VeLZXbAS/sYYmRXgMsHSKdhdB9Bju6rZ1ovV0mB4YsAiLPs+tM/HWg3hi2vfWrnle2pOckQZvLf2fE80ZfrwXsnYG7R4FNL1oHOVftwfKyQzP5c9lWLLMq1YDhy46ezL1w8TMGH2clIXLobmwM0+PwrXgMbRSCYE8n1AnxsP05Pic899fiM4her8e6sxGYvtLaUVfaywkrJzdDgLuD5PPi0rKh1evhrLSDm4R7p9BIuc+W4emxFFLqj+Z1rk5h4PutvQxuD6oPxN9kKLIBuR3H0Zavs6X0rT2cwNccQMeNvSMdIJFnWI5pGlLbeBKD5bOS6d7R5nBMur6D4vOIVYCDJ+AWDDhYOCoLmIK8FvUxlyFbNhQYsoSCVvm2LAWKOs9W6xnxzDdLj6Hrc/BiiibabCAni90CQxrxs3UNZLlVg7F0Ruq1fF5uJhcZ3EtTBEq5R3HH3gUo3woIPw4o7Ph5yhQs+b28Dmj3Pr+XHNyBhJv8naiTgXHb+b115wDdUolhFEfqjqBg4l4KWPMC/0ZGb+DvzLsCW4Cbdt8CKGSNXAvc2sfg5QZj2GI8M5FlxfdPAlMOsYT4SdBqGMCs19EN6hb4ZMcTCATPDcKRI3jqxKRmo0k52/X1BUbKfTjIcuBmr0NE+lPQL519aaUNPyqEHMGTo1ABAXV4E7rnM+l97p8EksKB4AaAdiyt4toclhbkhyaLq5lyBUUdOwdgzyfsQqVyZ1eq8KO0zvefD+z+lC4dgCVdHT/la2enA3FXuPq56RU6gwDefLd9nzew0RekRR69Djg2l2JU5a6095drw240YXuBs4sZznx1M3BpNQOZK3biDf7/2Tvv8CjK9Q3f2ze72fTe6L33XhVBERBBiiLYezn2ctDfOfZy7GJXrIgiiKDSpEmV3ksgIaT3Xrbv7483IVmyoShV576uXJCZ2dnZze7MN8/3vs+z7h3ofx8svMd3GteBX2DKTbVCTlWhDPKtJbDtc/HaMIXC5O9h62dQcBhPQl9+OVhMUrGGp65sS5XdSasoy8n/TgoKFyG5pTZeXeK7VSm1sJLDeeU+hRyVSkVEQP3l54TAOPGz2vkdRLUT8WHVi9Lq2PtuaD9eYp2riqSSoi5up9ysd5oo5yqPWwSIfvfDkVVy/qnIheguMOxZSSna+6NUmXS/Rc5he36Qc6y1RPxZ+t4r/276CE9MV1QdJwBnV8g5o6i11UbOHqm8WTpdXlujvjBmBtitUJEj7atTfpT0xAMLRYRvOxb63w/pmyCstVxLLJGw8H7xmtm/sPq8bYPml0or25aZYA6VFqbQFuJftPol38e29QvZ38b3ILKDXHMC4wEVfDpUxP6Bj4jo9McHUpEz6HExoy7NFGHOLxgKUyQN63gRB2TCYs1rcMX/qj9P70pUvSUGut8obXOn64tzPKVZUv256cPa9/ay5yToQHeevkcKCgoXDYqQo3BeKbc5Kbc5z0n0eM2NZrifmvSyE6TcnE0i2shNooLCX0WtFsPhoqMyu9gQRUfA6RDxZvxnIsoExotB58FF9bdvMqjWM0ZrlNnTmSNqTUT7PwC/vyImnyAx5/0fFONGc4TMWteIQN1vhJ53QNpGGVSrNOJFUJYlM+X7F0Le/oaPvSRNBB9zKEyaDcGNpP1q60y45nNpk6hrQJq4SCqJbl4qlUq+0rZq8Ljhuh9k9rggGebfIVVz0xbA0qelnWznt5J+A9gGPEGCLZpdeVnszijG44ZO8UrSosLfkyqni6yShr8/O9OK6d887KT7cbhcVNhcGHRq/HR/bchZbnXidLsJ9NOhaigcIawFxHeXKsBjD8yB354WQbjHLSLu5O6T9s8ak+MaklaKEfK+H2Hgw7DnR9j/U+36zG1SWTvxa+h0LeyZK+fiRY+JaFHD0fXS5nnNF7D2DVQthlf7uVxEvic6E1z5prSe1X1tKWuliuba78E/TESXz68QgayGXd+K7801X4jYseAeSN8iv9c7by8WQ+SJX8O3k6W1CJX47DRE+iaphPJ4ZDLghxth9AyIbCvVUcYg2WdCXxH3jqyWY2gySNqgGg+QCiF7hVTeNETGVqnwmXdb7bLSDFjxnCSltbr89N7TupTniLCUVqftOWUtfHIJ3LxMKkQVFBQUTsA5NgmBm266ibKysnrLKyoquOmmm8714SicZ7KrB4qh50rI8Qsk1KQ5f0JOZHupYrCWnJ/nV/h74XFLRY76BDdI/pGQvhGiO8tn75vxUp7e6w6pbqlL80ug950y8wjQYYKYU9aIOJZoSR2pEXFAWpF+/hd8NwV+vl9mrXP3yb6jOknVy/p3ZUbbWixVOlqjtC+0uEyq1HzR91645P9kRnbWBNj+pQy4t30uIlBZlu8UGUelDNBNJ6nyMwTIMc++DpY9JYNzlx1+f1UqfRoPkOcyBOAcNYP3D/px+9fbWLgrC4NWc858vRQUzgd6jRp/Q8PnlcahJ7btdbrcHMkv5+XFB5n62SYemL2DbUeLKK06/QjuvDIby/Zmc+tXW7hx5mZmrksho6jK98Zl2SKq+CJxsfh6afRSPTLk3+LHUoNKLW0zQ6dDu6slsa+uiFOXpdOhMEl8dQqSvYWOGtwuOf91niIVLa6GI9ovSFRqqZjx+dqcsPplKMuRuO+6Ik4N5TkihFTmiYgT3kq29XXetldIlWX7qyG8pVRNnsjs1xzubVYMtf42R9eJ2O+0VcfOq6SCdPQ7co7f9oWIaju+kX2c6Hn8I8UnyBc7Z/t+3adKQbK3iFOD2wWLHq9NolRQUFBogHMu5HzxxRdUVdW/AFdVVfHll1+e68NROM/UCDnB50rIMUcSZlKRXn6ehJyo9nLznfrH+Xl+hb8XAbFSkdNpsswMthntbYxssEBocxkUz79TRJQBD8oAe/8vIubcuV5iYq/9ToSXDe9K2lN8TxEzkuqkiYx511vEOZ7c/eIDEdlevCg+HSal9hqdRJjvnC2l6WXZIgqV54rZ4/Ezom2vEi+GH26UmdSiFJkBLjgsA/XYrvUTt+qStELMoBN6+14fGAdqne+KneTVUJGL5/f/4bljHcU3rObVrI40ihHfgphAI3anm9bRiheMwt+XcIueG/r6Nvg16zUn9Yban1XK5W+t4ZM1R9iVXsLivTlc/f56vt+SRoXt1AWNvDIb//5xN7d+tZUNSQVsTyvmmZ/3cc0H60kv8hGJbSutNaf1Re4+MT62FktqVeN+cNsq8Q2bNEsqCxOXiNluSWrD+ylKkRS9yLb1E5fqkrFVUp20hvoJSxc65XnSQtYQaX9I+4/qBLcShclweLn8P7YbHFnT8LZHVkPrUVK5efBXMexviC5TYPdc72Vl2VLFmbZJ/j575sokgAq45Gmp9tkzV8T+vAPymJS10OyShqtyet8p/jsNkbuv4XUn49DShtelb7r4Pi8KCgrnnHMm5JSWllJSUoLH46GsrIzS0tJjP0VFRfz6669ERFxEJacKZ4Ts0moh51zEjxengn84YX4qsso9nBefb0uM3NCmnGAwo6BwqpjDQG8SIccUJoLJ4CdFcPGPhDHv1frPWKJFnAltIYNZrU62z9wu67Z8Do36QEIfEUuaDJLZa0O1D0xEGxko604wE6/WyIB4+AsSqWqJEnPv7jfJ8pCmYgzZZACUZ0n5fc5eGP2uPLaGTpOkPasuTqu0eYGUutccly8MATJ4v/Itec7j37OJX8Nv/9fAYy3gsKI6+AsUH8Vv20dMbVpOywAHfZqG8PK4jrSJCiA6UEkUUfj7otNomNqnMSPaeSeyBZl0fH1LrxN+/gvKbTw2dzdWR/0Jkxd+3U9+uQ/fqgZIyi1n6b76HlqZJVa+WJ+C3enyXqFSn7hVxmCprTC0lUr1XuERMX//dpL4qbhskp6kO4GfjUol5yyH9cQ+KVpDrYktF1lqlVolLUoNUZOq5HY1vI1aW/v+OKpOft522cRI+uoPpWpz8OP1haIuU6SCp+hI/X1ojqsic9mllStnn/wfpFLHUH0tcTslsXDES/UrW9uMEt+1mnQzn8fcwOuxV0rk+YrnpX1q21dQnOa9jSmk4f1qTyKQKSgoKHAOPXKCgoJQqVSoVCpatmxZb71KpeK//23AsFPhb0t2SRUBRi167Vm+YLnschMa35twnQqbC/KqPESYzvHASqUSIz9FyFE4E5RmSbXN5o9rl+2ZKzOf1/8opsAuh0SZDnoUFv5LZixr2DBDjBUzd0or05InpSWpJE1K0VsMh46TYM3/pDpn52zod5/cwPgavLceLXHj9jIpOZ/wlRiH1k142fwJXPYsZO4Qs9C9P0qK1M3LpaLHZZcBteu4FgxHpQhOxkCpuBn9Duw6zrC0hm7TYM3r0Li/HENFrghGwU0hKA6cdvkOqrVipqw1QvZOKWXvNOlYO4UqcQmGwmRiN79PSI+7eX/c/dh0FsL9DajVF9lNmYLCaRIRYOSlcR156DIbyfkVBPnpiA8xERVgPOHnv7jKwb6sUp/r3B7Yk1FKo9CTm/663R5mb264Kmbetgxu7t+EqBpRyVYOJRnQbGhtFUhdNHrxByvNqF3WbqxUf9SgUkkb6qJHxThdo68VAOrSdKhUCWbtgJH/E98dX3SYIKlHTQaKx9fFhN4ilZ4rnvW9vv14SZ+qEdh9EdsdItvAimfkbzL6Hdg9x/e2HSfKNSl9s7Q7XfW++Khd+x3kHgC3Q6pn9s7zndKY0EeuK8eTvVuMjWuoKhLRSG8WQWjPXBGZJs2Sqk+XXT5DfsHy2uJ7+26B0ptlYuR4HFZJPft+qoh4INc5cxjcuEhag0E8m3y9DpB2PPPJPagUFBT+2ZwzuXflypUsX74cj8fDDz/8wIoVK479rF27ltTUVP7973+fq8NRuEDILrUS6n+SBJ0zQUmaXFD9Iwj3k499xvnyyYnqIEkKik+Owl+lMMlbxKkhYyscXAwR7eXGptUVoPWrX50C4g/TdKAIJe3H1cb2XvsdLH5UqnLiesjMsloraSHDX6w/6x3cGAY/Bhvfl0Fxx0mw5TNvEefYcz4tgkkNB36BT4ZIVK1/pFQE+WLLZzB+plTnZO8W49LjSegjg/1ds2HhvSIq6S0yIK/MF2+do+tkP5O+Ec+fkCYyIzvuU2g6pPYmUK2RGxXAb/MMAqoyiDzJTayCwt+JIJOeFpEWhreLolfTUGKC/E7++T9JsevpVMO6T7CtrKtzLOU5cvPc+y5pO62LSi3pQ3XPl2EtJHloXx0fnC7TRMTO3gWbPoIr3/CuFgTZd5+7xdOl8QCp3BnhI10prIWcU+N7yXlHpam/zYWMzk8E8OEv1F8X3koqQZ12Oce2uKz+Nu3Hiz9O3gHZh61UfNq631x/24Q+InCkb5bfy3Nh7ZtSEfTNNeJ5s/9ncNikdff46hn/CLjiVd/+O1D/erVhBox8vXY/B3+VNqw9P+BpOYJMTTw56gipmhnzrnjy1EWjk7h1Xx5v5Tkw9+ZaEaeGinyZTKms9tWxRImZ9PGEtYQBD1RXcf01KmxOjhZUsPZQHptTCskoqsTpOk9jXwUFhTPOOavIGTRoEABHjhwhISGh4cQBhX8UWSXWc2MaWhON7B9JWPXAL73MQ5fIEzzmbBHVUS7wR9f/tcQDhX821jK5kWiIbZ+D3q/2xkWjkxlOp1W8DWoIbiKVKD/cJAPtGibPlrjwH2+Xm5SEPpC6XuJjNXpJLDmyRsSR2O7Vn2vElDi8JbS8DGZf6/vYPB7I2iXpITVeBSq1zI6uekEEIZ1JxKW6bPpQPH/uWCv+AmGtxGNn/y9y7G1GiWDz+ZW1z5O+DXbOkhsIkJuTab+IH0Pd2PYtn0FcT4kbrhmANx0E2786tol6+5cQ17Xh91xBQYFAk46Wkf4k5tT3+FCroH1s4CntR61WMaFHAgt2ZvlcP6ZzLCHmOuOHpJXifbPgPhEOKnKldTS4ibSK6v0hP1HE4vjeco7JOwBjPxDvr8B4OYd8Obp6fytEBL/+J/l/RZ5U+QXGi3HyyP/JPlVaMU2+/XcRG8qypAIntHl1i5YOItriLs9F7R/u66VcmFiLYfFj0GEi3L5G4tnLc+W9DGsh1UiLHpXqzEv+T4SdI2vkWtP+avGfWTpdzqfXzhEvot1zpMW34zVSbWMrE0+0knT46S7v509ZA0OfEmEkvBXEdIWs7bL8tlWwb4FM0kV3lL/J91NhwEPyntf1covvJZ+FuqRukL//5G+l7So/EeJ64EnoQ6YqnH//fJiU/Ao+v7EHWnUsVWMWElSwjcCsDVQGNqWi8XC0wXFEan2MX3P3SfuWL46ug6oCMAVLW1aH8RI5vmcelGdDq5HipVjX6+5PUlhh58v1Kbyz8jAutwiiFoOWd6/tSu+mIRh0F5mwqKCgUI9zHj++f/9+0tLS6N+/PwAzZszg448/pm3btsyYMYPgYCXO9Z9EVrGV6MC/PutwUopSwS8IdEbMgFnH+TM8tkRLTPORNYqQo/Dn8bjAUdHwenu594yeyyGzgVe97y3k9LtfZg9tPlohhj0js4N+QeJZ07if3MjsnSeD+vheYAwQc9Dml0pJ+pB/y4BbZzyxd4K9wvv4Wo2EHV/LTPjmT+W5Fz3iXZ2jNUhrIiroeC1oq9sewg7WptUUJnk/T0Wu3Fgce9/cMktcV8SpIX2T/DQZJJVJGdtr/TRASvLdbokbVlBQ8InH4+Hhy1pxz6zt2I+b/b9rcHMCTac+9GwV6c/AFmH8fijfa3mExcDN/Zug19a5Ga05h5VmwJxpUoEY1kIEhrk3Q5+7RNjRm+H3V2qrBduPlypEjV7Ok/Y651WtH/z2H/nO+wVLip9/lHix7PhaxORBj4qYs3eu/B4QIxM1q1+CgiSJOR8/E6y+280uWFQa8XoJbyP+Li0uE6Po1a9ItWbdVKVlT0nba9/7JPHLUQl4ROiylYr3zRejpSI5/5C0MAUlyLl5y0wIbep9rq1Ba5QY+PSt8O1EqZqM6QLLn60WRELlelFwWLb/5UG45vNaIccQIH+fGq+eute5pOVSkWkMkmNJWYu74Ajurg/y5uURVBRmEliyhxx3IP9bX8yh/Fgah02jMMvOjmUp9GhcwrvXdiFSXSYin71C/vaWaPFh87gBFWz9HIqPyrXWFCJimFoL5khpB3TaoN1V4Bcq4o7+zPivbTpSyJvLD3ktK7M5ufmLzSx9YCBNw0/QEqegoHBRcM6FnEceeYSXX34ZgN27d/Pggw/y0EMPsXLlSh588EFmzpx5rg9J4TySXVJ1yrNzf4mSoyKeVBPupzp/EeQqlQxmjqw6P8+v8PdAbxFvmQO/+F7fbKikd9TFXi5l8H7BtbGp5rD66SzmMAiMlXjZzO0yMN47V8SbK16VG5/9C8QosulgiO8DK16AgQ+Jv8xv/ydGy1EdpAXKF7FdxVi0hjajJMYcZPbbHC5VQXvmSUVddCfZZtULMgi+9L/wzTjoc4/4D2Tt9P08cd2lRaKGxv0h8QRpITtnyyxt8qr63hCdJioijoLCSTiSX8HMdSl8Mq07P+3IYHdGCZEBRsZ1jSM5r5xyq4vgE6eXHyPcYuR/13Ri45FCZq49QpXDxZUdo7mqSyxxx++k6WBvgbYwWX6iOsiNemhzqYioS997RYD4dqIICd1vknPn/oXikeOyQ7Mh8PurtY/J3V/7/163S5x1m9Fyw16DSl1b2Ze5HewVOP2jUblc6DQXSSWE1gC974bgeKnMLM+VVjRrse9obGuJCCtfXSXCRUJvGP8Z/PqIXG9spd7vf902qEnf1N9fcBPI2Ay/PAQthkkV6LzbpO135fP1W5dARJGKAmg7GoIaie/O/DsBcN+0FMfuHzGkrZGqrHZXy/GsfvnYwzXX/0Rs4UbUC+4mqELEQ3+diZcH/peZoV14a10uNZ2FGUVVBFelwdyp8plodYV8DpY8UXt91fvDyNcgIBrm31UrHsZ2g0v/I8dWs0yjg/4PQc/bwBx68r/PCSgot/HW8kSf65xuDz/tyOCBYa3+0nMoKCicf865kHPkyBHatm0LwNy5cxk1ahQvvPAC27Zt44orrjjXh6NwHrE6XBRWOgg5F9HjhSm1BnNAmJ/6/HnkgJQCr10uA46/eMFW+Iei0UBCXyk5zzvovc5ggc7XSdzq8VhLpL2oqkhK2oMay2yxWiszihtmyOz0vNvq3LCoqs2HHbDgXhkgN79Ebla2fg7l+dIO9f31tS1Mq1+G4c/LzPjxlTnNL5EZ1JrWqagOENFWnr/ztdB2jNxUuV0iGmXtkFnTNa9JuT6IyNLlemmJGv6C7+dpdoncLNRt0dKZwHYCfyprsZTrL3/Ge3lke4ju0vDjFBTOIE6Xm/xyOx6PB4tRh7/xnA/X/jQZxVY2JBew9WgRI9pHMbJDNEWVDp77ZR/55Xau7Hh6bSMRAUZGd4phYIswXG4PQSY9Gl8+PUEJ0HK4VAjWZfs3uCd8hfrAz3Jjv/t7ESRqKke+rePXtet7mPiVnANMYfDLAyIcBMR6mySD+LKEt5aqlZoqnvbjpF3GaZNzqq0MNrwLtlK0xiAqne6LR8jxuKDDOPh4SG2r0O45Iroc/s27cgnkHK7zExEHxAz691fET+hknoDHn7tVKrmmrHtLfj+0DDQGeX+dNt8iTg1VRdLqG9lBWoH1/ngGPoy9soTKhMHoO1+D6sAv0splK6t9XKO+YAxA/ek4maSowVFJ0PJHuGXij0zo0JwgZz4qnQmMFvSL7pfrpEYvIuC3E71fi71chMLZ13k/16BH5fpcN2Lc5YDVL+EObkxBs7G43BDur0ejqZ088Dov+GnxN/i2JrC73KQX+qhwquZAdhlOtxutMjGhoHBRc85HBnq9nspKGVT/9ttvTJ06FYCQkBBKSy+yslOFv0ROdfR46NkWcpxV1X3dA44tCjOpSCw8j0JOVEf5N2WNlNQqKPwZQprA5O9EzNg5S8SPliPkJmXpv8UP53gi20uJ/7BnZUD8ydDaQbo5XHwfdGbvWeeSNIkfr1lWfFSes4bB/xavnBoRp2ab7V/JLOr6GZC2QW6Mut8oM5fL/iOD/haXQZepcHgZjP0IMrbIoLdmIB0QK8eEyru65/Bv0PNWWPsGbPtSEkc2fiDpIqZQ6Hm7tEf9eJv368/aJSXuu3/w+ZZ6mg5BFdVJbhgSF4vw0/0meU8Dok/pz6Kg8FfILqni201pfLXxKBU2J4NahvPw8FY0CTWjO9sJj2eApmGSSGV3uVmwM9NrnVmvwWT4c0JGkOkkYwVzGIx6W1o/N8yAygJc8X3J7/045eoEwlqMxVKVjrr7TVJ5d3St+OrUxV4ubTuj3oF51WbqvzwkaUv75osvi8ctInifO+H3/0mrjyUKhk6XVpnvp9UmXflHSkWGRo+qPAs/y1/3PjlXeFCh2jXH2+/FUSki98SvYcvnkPSbnCM7jJeW1B/v8N5J2iYY8pSMwxrCEi0tScFNxF8otqsYVu+Z530dOvgL3L5WzKeDEmq9D48nsp28/4mLIbI9nqaDUP1wE0ZrCUYAvRn3iFdQDX4S1YpnZJKg87XSHrxzlreIUwf/Da9gSegDa1+XBUEJYqBdWQihzcQ0+3hBKry1+O/UFXEi2koKV10Rpw7qVS+yzdqGJ5flMbVPIyb2SCAq0EhWSRWz/kjl641HqbS7jp0XmoaZ0Wq8zwt+Og2toi1sSSny+Rw9m4QoIo6Cwt+Acy7k9O/fnwcffJB+/fqxadMmvvvuOwASExOJi4s714ejcB7JLD5HQk5xGuCRAVU14SYVq1LdeDye82O8bQ6XG9QjvytCjsLp43KI2bG1SGYu+9wrQkPmNhFQCg77bmlqMwaqSuCqD8BtF4PjulTkweInpJKmLls+g8tfhVnjxbPGFApdpki7k1onJeGl3jdsgMyMp2+GrjfA5S9VD5CrTY3bXQXtxojPTspaiOkGhYe926BAZsHn3QbTfpbjq0vNDcahpfI8na+TVgdLjLRGaHLrx5gXH5XqvLAW4tVQF40Ohj4tAtnod6CqWKqOzOHiiaGgcJbJKbVy65db2J1RO7G1dF8OqxPzWHBPf1pFWc7j0Z0aMUF+tIm2sD+rrN66WwY0JcJyFpMqLVHQ605odzV2h4PvdxWT4IkkvKIIfaA/noD2uFPWot46UxKDarxV6pK5TSr5ataVpEuiUesr4bLnJCjLViE/jfuJSXpVkZx3Vhxn2FueA3Nvkdhpp82ruuJCR+W0Qf7B+iuKUyFlnZxrh06Xc+Ovj0qrrM+UMbe0trcdC/t+rL964MMi2kz+ToSi/QvE66b8OINij0cmIda9KefpGqGtLi1HiK+RfxT0vhOXw4rm48HeFTz2CtQL7oYbfoHbVksLWU0FaEFS/X3WvB+FyRJXX/d9mHebpFqlrPF9zQ2Irb/PgBjfn7tj+z1KpL+Wggo7b/x2iFWJebw9qQt3fL2VvZn1zwsL7+1Py0jv80KQSc+jw1sx4cP6sen+Bi3D2vpI2/KFxyOfYbdL/IqUCnIFhQuKc35Feffdd9Fqtfzwww+8//77xMZKROSiRYsYMWLEuT4chfNIdqnM0IT4n20hp3rWpk58ZLifCqsLCqynHoN6xonq4J2soKBwKpSkSyvVrm/hi1HwTleYdY2U90e0hcydsv6q92Q2EGSmetDjMOQJ2P6FDM7XvuF7/xV5YrpYl8Jkadca/zl0v0X2nbkd5t0KC+8T4+7IdmIiejyVhbD+bRm4vt8X3u8t1TsL7xXjx73zxO+g8LDMbvvCViYD5fA6Pf0qlQzAa6gqkhaGlDWSntVmlAyYx8yojV23RMNlL8hxjngZd49bpSoIcDceiOvmFajCWsi2erP4BAVEKyKOwjnjYHaZl4hTg83p5tUlByizOnw86sIi3GLg46ndGdC89jxi0Kq5c1BTru/dyNug+GygUoElCpclmk4xJvpX/kabxZPw+6A7mtmTUOORqrySdK+Way9qBN8a3E6pyFlwj5jGx3aRFL/VL8OHA2Scsf4t3/tyWsVY13gO/ADPJBpd/fen2SVyTk3fBF+OgpnDYcc30G2aCCg+UYPOIJ5DAx6WiQCQKs+xH0hq1Mb3oChZfHTWvl5fxAER1dVqEZAqcuDqj6XCFMT3bdDjUj1Zkgo6I87ktai2fNpgG5Zn7ZuScLX9a/hyjHi71VwzfRHWon57nbVYJjGcdghtUf8xJWnyOK9lJ/jcAYQ0Jb209nueVWxlf1apl4hTg83p5rWliZTb6p8X2kQH8PbkLgTXSYZtFu7P7Nt6Exd0CobK5bmw5VP45BJ4sx18fbVMPl5spt0KCn9jzvnoNCEhgZ9//rne8jfeaOCmQuFvS2axFYtRi+FsD+qKUmTgUOemL9wkGmZ6mYewMxMQcPpEd5LS39LMMxI1qfAPoDQTMraKwfGu72qXux2Qu1tmRTtOgEZ9xGTxkqdFgFGpYdWLEt/d+VppZ6opDz8el0MG05HtvVulDiyUWbl2V4mZZU35eVURbJwhFTGj3pZ0mONpf7V4HNTgsMqge/NncKjaz8IUWn+QXJeCJJnxrvEDajFcZoXrotFB5ynwYX8Y+Jh44XSaDCNelvJ5nRE2fQxLn8Q25D8cbPMQLXrdh1rlQWMMQGtWUhMVzi+/7PYdtQ2w6mAe5VYnFqNvX4wLibhgE+9e25WCCjtVDhcBRh3hFgPGcxh57Oe20j71a1R1z3W5+0WMueT/JCa6563eKX417JwNg5+QSOvjGfJvWP+uVI7UoDV4m/ceT14idJn251/M+aAiT1pM//hAxCj/SOh5C3w3pfb8X1koAnxcD6lW+uVB73007i8TCT/cDIMek/d62H8lQaroqCRg1bxv+Qeh8UCp0vT1N2k7BvxC4Otx0pYU3Bi63QADHxGxJroDvNsDbl0Jv7+KetATqPfMbvDlqQqTRICrMbJOyoPed8LmT2pb4+rS/WaJYz+eoiNQcAj6/UtErbqtWfmJMpFgDKz1Cco7AGHNvZfVoaDnI8z4o7aarVN8IIv2ZDf4OlYdzKXM6qznl2Mx6hjZIZrujYIprrSj1agJMekJO5WKuKpiaaHb/lXtsqwdMnk04SuZKDkf1ewKCgpenJcaz6SkJKZPn87kyZPJzRXVfdGiRezdu/d8HI7CeSKrpIow/7NYYl1D8VExJaxDuEkuQGml59EnJ7KD/Htkzfk7BoWLi4ytYjRcV8QBGWCuelkGt1s+gzk3wDfjYfa1MvCylcvAuKqo1iz4RDOCeQfhildkQF3D5k/FXHjl8749BNI3yaxoi+G1y1Qq8ZKoiRavQW+CiNa1Ig7I7F9QQsPHFNZCZjcBWl4us7u76gzS/SPFjydxiQhO698SL541r8HWmRJVPmca7PqWyu53kpZwFdN/TaJYG44htJEi4ihcEAT5NSzSmAya89MK/CcJNOlpGu5Pu5hA4kNMZ1XEcbrcpBdW8uP2dF5bepDl+3NwluWiaqhKZu0b0iqVvln8wgx1WlMCYkR0CG8j63R10rFCm4kXTF0RB6orL05wTo3pLP4vJzP9vZDQ6ETAufpjaRHqOhXWvtnA+X+zjLNMIbXLmg6W1qvCZBHNPG6p5PzpHhHIlj3lLX4FxMGh3+DyV6DJwNrlKhW0HgVDpst5vcZbpihFouHnTIMfboRDy6ViqKoYTCGoStMlDr4hItp4C0YeN6x7G8Z+KNWbNRgCYMRLknDlq1IovLWIUps+hKve9x5vGoMosaspmfhjbWUowOpXcU/6VnyBatAacQx5mkWVbbzaEq0ON5YTmJ2b9BpU+D4vaNQqYoL8aBsTSMtIy6mJOCCvs66IU5dFj8pn+U+QXWLl98Q8Xlt6kO82p5JaUIHd5Tr5AxUUFHxyzityVq9ezeWXX06/fv34/fffef7554mIiGDnzp18+umn/PCDbwNKhb8fmUVVhJzMvPBMUJgi1S91MOtUmHWQXn4ehRy/IJlRSvldYo0VFE6E2w2HV0B8z/rrzGG1SSG+KM2ASd9KiorTJmJM3/tk8Hs8OpMkvxSnyuBVpZbZSZ1JTCZTNzT8PIlLZEazxy1gDJD2pB3fiJdBXa+agiRvA00Qw+L+D4qJsccFR9fX3vQYgyQSOLab9Ol7XKA1w22/Q0W+3Gzo/WX283B1tLi9Qm4ADv4qz5W+GefUX6h0qZh/2EW8zUznuMCz39qpoHAaXNUllg9/913ZMaVXI0KVz2s9XG4Pu9JLuO6TP6hyyE2hXqNmyxQjAcebz9Zgq24P+eNDSdEbM6PWD6vgkCy/7Fm50b/6Yzn3aQ1yLkzxMfmy/SupyFj/trS4WktENHc75dwU3gYVHmkTvVharPyjYP07kLxSqpNiOnvHsB9PynqY9ivYyyTF6eCvMgFQw565IgZtfK/+Y4ObSGXpoUUi9F/+qvjWqDUyQaDRi3CTuAQC48Vg2lklE2E1pv7JKyROfvvXENcd1VdXiQn+zm/ri08qlXgpfXed9/Ijq6Xd6pKnIKSZeMSoVHgqi1Atfrz+cZtCKYnuh+fqrpSXlRIaGYf12sVUFOeBx02u28LrG0pJLS7lyYEzGRynori0nEICmLfTSY9eX9AmwIbabceqDybDGcD0L3d5PcXG5AKmj2xDfrmNrGIr29OKvdZP6d2IsDN9Xsg9wcR6jSB5mpXkaYWVTPn0D44W1KZI6jVqZt7Yg56NQy4KI3cFhQuNc/6tefzxx3nuuedYtmwZen3tiWfo0KFs3FjflEvh70t6cdXZH5TaSqGqECyR9VZFmFTntyIHpCpHqchROBXUarkhqDtzXINKfeIyZ49bvGzWvCaPb3WFeMNc8T/ZZw1BCXD1R/Dzv8QsuCJXhJg/PpQZV7XmmKeMT4wB0o7lqTaNPLJGxMq6M9pxPaDZEO+ZW60Bul4vVTUGi5TPj3pLWsPCWsgxzb1FnnvxEzLQ3j0bljwJBxbId3zhveKx4KiT1OW01QpGFXm4Dy9nYaqe0KAg1hzK447Bzc9+a6eCwmkQE2TkwWH1KzvaRFu4vk8jdBeRWe65IqfUyk1fbD4m4gA43W6cGuOJH6iprn46vBxWviAi8fw7YeULeIY/L/43Gr1U8x1ZLQJBWbYYKh+P2wVx3UUgV2ulSmPSLBELxn0qQrXWIOfqiwVPdUhE3gFpR8s7eOLzv7m6PfaPDyWmPLariO/GIFl/8BcRubpOlfeohpgukoIV3VmqLTO3wd4f5e+TsVW8a/b8INeXqz6QViqtXip4xn8q8eYAhkBp9Wo6GNa8LqLbpo/E061ulYxfsBhdW0t9V0jl7JGqoYo8EbFS1oHBjGvKfG8fuPBWZI2dy0FrCJd+lUP/r0sY/20aOapw7l/tov9XRVz9TSprk4tJLaxkfY6Wfe4Ekg2tGDsrnU82ZHL7/HQGfpnH5XMqqDTF8ulG79CArvEWNt7RjCZJX/GufgYzW29m+Y0J9EiQcUDbaAvX9Uqol1r1lzEEnHi9+vTaO8utTp75ea+XiAOSbHfLF1vIKfORsKmgoHBSznlFzu7du5k1a1a95REREeTn55/rw1E4j2QVW+nWKOTkG/4ViqqrFPzrCzlhfmoyzmdFDojh8YGFkqwVFH9+j0XhwqfDuGojz2jxufEPh4JkSN8CTYdA0or6j9Hoxe/GYYU+90hZdEgTucHQ+cHk2XKDUTOrueNbad+KaAOz6lSK7flBZmU7T4HNH/s+vrgeMHuy3NRc+h+I7y03QDctEX8ftUa8cL4YDaPflsG82ymizfZvvGe6d3wDHSaK786Pt4sYpPUTw+ZZE2S2tgadScSe4CbS1w9Sxl7m7SugP7yYoVdMJKXEw60DmhJ9KoaPCgrnkEA/PdP6NuLStpHM25pOUaWdkR1jaBsTQFTASYSJfyjpRZUUV3qbvbo9kFTlT4g5TKr2jiespVQUTPhSzpGWKLDbYNgzVAU2x6DTotKZxG9s1oTaNKYjq2HqAhEZaqoMDRbxDfl6vLfP16YP4cq3RIxuNgS3PgCPKYyLRjrWaCGhTgXogV/EM2f71763j+kiLb015sIb3oXhz0HHiXLN8HhEcO88BSZ+Je+fOVzEF1upPNZeUbu/dW9Km1PeQanmMYXA+Jmw8rnaFqetM0XI6X2XxIfPvUne8/IcWX9oqWx76X/l76Qz4dHoUK18Hvo/UHsNOp7ml8r1KGsXRHdC9cUo3C2vpOLG1ZQW5OLRGjEFRZJW5cdjc3eTXy6eOnszS0nMKePuIc0pqnSw9lA+Bq2awa0iKKywcSS/grlb0vjp7n6sOZTP3swSujcOYVDLcGKD/HhlXCf2ZJawaHcWLSIt3NykEN0XgyTNCwhiLkG6l/jkmrkc1PaiUZg/kWfjvBDWQq6rjsr66xL61BpWnyKFFTaW7/fRlgZUOVwczC4jLtjkc72CgkLDnPOpgaCgILKy6vdWbt++/ViClcLfnzKrgzKb8+xHjxelyM1jncSqGsJNKlJLz2NqFdQmLtT4ligonIigRmCvklL/ilxIXCoGvk0GiQHn8aXOKjWMfF3ala6aId4CnSbJwHr1y1L27rKL4LH6ZVj6lHxX+j8gs6DHs+s7SQUJ9+E7MOTf4htR08rw23/kZscSLQPl76dC7gFYOl0G2VtmwuUvQ1RHKM/z3a6w+zsRY4c9KxU7+QdF1Kkr4oAMNhc9JjOoHrfMJo55T0wr6+A2R6BSa2keaVFEHIULlkA/PW2jA5h+ZVtem9CZoa0jFBHnBJRafdyIA/9ZXUjlVZ9LpU1djEFizPv9VPn5dhJ8PhIqcyk7spkvdlVgdaug6zSpAKwbqd39JhEyLn+ltrqm771iJn+8WbvHI9WNdokpVzmrcHouGhkHm8qEpyBZKiNBJp3ajvHtBTTsvyK2HJ8QtWS6tJH3vV9+93ikDe3byVI1U1Uk14kf7/AWcUAmFxY9An3vkd8rC0Wo73Fc7PiGGdDqckluHPcZGIO9K5+ydkil1XdT4NtJeEzhUtW55TPvv2MNgXHQ6w6Z1DCFHvPk0SX+TFX6bib/amPA59lc8/VhEnPKuaxtJB9d3433ruvKO5O6EBloxOHyEOSnY0L3OAL9dDz4/Q6e/HEPNoeLO4c0Z1dGCeO6xfLmpC5M6d2I+BATarWKqEAjl7aJ5LUJnbmlsx+6uTfUF1McVQQuvImOQdazIuIUVtjIdAXiHP+Fd+UUSGXT6HfAdHqecnaXG/cJhtuFFT7MpRUUFE7KOa/ImTRpEo899hhz5sxBpVLhdrtZt24dDz/8MFOn+kgHUPhbklUis/9n3ey4OEWqcdT1B0/hfip+K3Pj9nhQny8DSWOAtJ4cXQudJ5+fY1C4eNAapHe9bjJI9i4pY7/2Bxj7kZTBZ20HUzg07gdbP5eZVIBr50h61NLp8vul/5F40Zr1AC6bfC49PkZdLYaJv07/B0WcSd0opebNhsCBX+vP1B5eDgd+hm06GPmatHEdWS3rEhfL6xn9Niy4t+HXvO8naVlY8SxM+kYqe3xRmgGoYOhTIg65bFKWX4fyzjfjUuuIOlXDRwUFhQueJqFmn8v3ZpbzzqEEHrptLerDS1Dl7kMV1kKE6OX/FR+wGuwVuHUmFprGUVJsQ5W+GVTO+tUa0Z1g9nWS2nPt9yJAtxgOq17yfXAet7TmNO6HuzgNApv43u4CJMeqJiGyHWzZCNd+J+f7xCUSGV6cJudwvyDocI20ju34xveO9v0k145GfeT9spaJ55lfEOjMoFY17PFWnuvdTpyxVSYajufAr3LtS98E036Ras5d39bfTqPnQBEEj/mesLL9aEvT4MbFqJJXQEkGNOorlSgL7pUqofZXwy8PHXt45N5PGdvmv7y9rork/AraxwayaE8WH/6eTICflq9u6sUHq5NYui8Hl9tDqFnPrQOacsuAJry9/DDf/JHKpW0i+X5LGma9hoEtwxtMofOU50lUuS/KsvGU50JInO/1fwKny83B7DIem7eLPRmlXNYqiKevW01UxjI0RUmomgyUapw/UT3ub9ASGWAgp9Tmc3372IvEN0pB4QLjnAs5L7zwAnfffTfx8fG4XC7atm2Ly+Xi2muvZfr06ef6cBTOExlFMqN+xg3ajqfwiM+2KpCKHLsb8is9RJjPYxJIRDulIkfh1KjI9R1/6nLALw9IeXloU2lxsldItc6Ah6WUXaMFU5gIKyCGm8GNpXKmLlpjbSoISAVP26ukcicwDn59RKpiAuNgyo/w60MiBrm8WxsAGQjrjJC2CazF8vx1BaJ9P0k5/vEzscfvo6ZM3n2SVkiXTQw2I9rCile8VpX2eZRSUyOlfFtB4W9GmL+esV1i+HG7t8ir16i5vGM87d7eQIe4rnw8+nKCf7oecnwbuVZUVHB5q2ZYPOVo570GPW/3sVX1WGH/QhHA43tJu9HxlSh1sZeCRo/HWorb7YaLpLkq0KgCl1G802ZfJ+byTitUFIi40mE87PpeBJCtnze8o6pC8aXZMEMMis3hkLhIqmIaD4SYTg0/FuTaolLXvse+DKxtpdJqbK+Qa+QVr0LGJvFqq0FrpHzcN/yS5MJgcBLm34EOcf1IsHjwD8tEk3dIzJxrkrR63iZx9VVFdZ6nHHMd3SWn1Mq6wwUAPDa8NY/P2+WVOFVQYeelxQd48oo2dGsUTEG5DaNOTVaJlbtnbeebW3rRr3lYw6/7BHicJ15/uqQXVTH+gw3HvKaWHixmWWIxPRv35o2JdxDzF6pYIwOMPDWyLfd8u73eusGtwokMUCZXFBT+DOdcyNHr9Xz88cc89dRT7Nmzh/Lycrp06UKLFi3O9aEonEfSi6vQqFUEn83UKo9bZnmaDvG5OsIk5bRpZW4izOfRgDCqg5gAlmaedgqAwj+M/MMND+4Kk8XXJnMnrH1NUlh+fwWSV9Vu03gAjP8M5t4sM40FydKGVJPgAuJHMKB6BnL4C/Id+v56ed6EPuIdkLFVZgr3zRfRp6Fjiu8J276Q/2dsk0F7SFPvyNn0zfIdrbusLi2Hi/EliOGz1ljr51MXnZ+kdw16VFq1Bj2GO2sXDpUeV7NLKfQEEh4aUf9xCgoKFzWBJj1PXtGW5hEWPl6TTHGlg/gQP14d35HZm1KxOd1sSSliX7Y//SryfO9EpcIcFo/6p2nQdrS0dMZ2Pe6J4qW9NTBOzn8et6T4OW1y7s3d73vfjfpJklNMF/wMp2cSe16pKkHlqJTz6a2rxCdtwzu15/ugRtIeW5Ih71XGNt/7ie8lrU1jP4SCw3LO15ukJW39O+LZpjf7FvQ1+uqkwmoRxxLtPdFQQ9PB4nOUtFKSxkozxBTZ4xHzYks0zkYDeW69k0vbhWPQqXnx1wPsyypFrYLpQxMY0/UeAtJXocWFKrojJC2vl9JV0uxKViVLG1CAUYvDJcflb9ASaNJ5iTh1+XhNMo8Ob8XO9BK2pxYfW/7Cr/v56uaehJh9CBnmsIZ9arRGNJYzdz1zutzM3pzqZRgO8vb9caSQt5cf4j+j22HU/TkRUqVSMbBlOJ9M684Lv+wnOb8Ci0HLtL6NmNqnse/Xr6CgcFLO291rQkICV1xxBRMmTFBEnH8gGUVVhJr1qNVnsRKmLEsMXv2jfa4ON8lzp5Wdb5+cdvLviWKdFRQAOMlnVa2DpU+Kt8Omj71FHJCy9nVvwbSfJVrVWSWtTVe8Wpss5agUUaXnHVKF88eHtQP31A0yYK8RHLd+Lm1WPloXaTxAbnZs1QPbDtfA1i9g2DPeCVuJi6HNlTJjezxBCZJkklk9i7dztgz+fdH/QTFMnjURFt5HvtPIhpipTN3fncPuaBrHxmA2nPO5CwUFhZNgd7optzlwn8hE4ySEWwzcMbApi+4fwO+PDmHuHX1pEWFhc0ptNcVrG0spGPi8z8d7ut+CetcsOSeCnPv0/tJCVcMlT4l32KDjYqiProMRL/lOpGoxHPyj8JTlUqE7y+EOZxiP2yUiSqN+Eu299nVv0b74qFRnhjQWj7Tj/VSg9jpQVSTv6eqX5f068ItU81DtmdP/wfqPBaky3T2n9vfBj0sFaF2iO0s15q8Py3qQqp0f74BlT0lF6KHf2FOkZfbWbCIDjTw1fw/7smQCw+2BZ5Zn03dmDh+5RmHtcC38/IBc++piiSI7fiQbkuUz9cCwlszelAZAbJAfh3N9CEzV5JXZCPDTcVnbSJYfqDX93ZtZSpVdPv92p3dVly4wGseQp3zur6r/YzhNDVTy/Akq7C7+SC5scP3mlELKbb69qE6VAD8dl7aJ5Lvb+/D7o0NY8sBA7r+0JRGK/5eCwp/mnI9qXS4Xn3/+OcuXLyc3N7e6zLSWFSt8pK4o/O1IL6o8+/44RSnyb4CPqFDAqFURoIf0svOcXOUXDAGxcHSDlGgrKDREWAvvtJS6hDQV3wKAhF4y6PbF0XXSqrS0TitrXA8Y9zF8d73MfK54Dm5dKSkix/PLwzD6Xdj/M+ydCzu/g+vnw9o3Zd9+wdD5OohsK7OwIAaJziqI7gCHlkjCy4YZEjPrHwmlWTBtoaSUHFwsNwQdrpGbqJK02ufev1BifUe9DX98AIVJENocT9/7UBmDYM1ruBoPIrf3kzz7h5sruzrZkVqEQXsRRf4qKPxDKKmyk5Jfycx1R8gutTK4ZQQjO0YTF+yH6k/41mk0aqIDa9s/yq1OogONHKq+wd6WWsL7kY24bdw8IjY+L5UaAbE4+j2ETqeHH28DVBDdUUSZqiJoOUJ8cfbOFx398G9SKXHNF7BxBmTtlGt3dBdJ51v+jPi0mMPFlLftGMg7jCu6C1Uqfy4mJxCjVg0Z+8AvRM63vrCWQHE6JPSFGxdJjHvqhjrXgXYw/w7ofjOsexsu/T85x++dJ2bD4z6Dskx5r8fMkOfJT5QEwv4PyPI/3heftH7/kooc/yipwNT7i3F/o74iKNnKwD8ST0I/PE47ar0Z3C7sMT2p6nort36ShJ9OQ5nVQUpB/SoXm9PN2ysOM6xdP6Im/YRpywy0e38AVDjbX0NWu1u5c14O7WICeGhYSxwuN2sPSyJaUaWdiBN4rxm0ahJCTNwzaxuuOoJlqFlPRnElD81JJCrAyI39mtA4zESgn1485DpOxBHUGN3q56HgEIQ0pWrAk3ji+2A2Wxp8vtNFr1URHWRke5rv9ZEBxjN2HQ1XPOoUFM4Y51zIuf/++/n8888ZOXIk7du3/1MX6+OZMWMGr776KtnZ2XTq1Il33nmHnj17nvRxs2fPZvLkyYwZM4b58+f/5eNQOHXSi6rOjT+OweJtlHccESY1aedbyAHx9Di67nwfhcKFjjkChr8oM4910ehE3KhJmnL6NhQ8hqNKqmJq/GrSN0Pyaok0rzGw1Blh6HQpeXc5RFxxVomp5OxrYeQb0PoKSfUoSZcWwUv/A7n7pJ1qzf9qn6/JIGnjiu9ZPWvtEf8Jjwsq80XcqSrE0+9BVL3vlu/u/p/g66th9Dt44nqI+ShIqXtoM+h1l5g5Jy5Gtf1rMge+ytGRy9ia5eCzOYUYtWoGt3MyuFUEgX4XUTuDgsI/gHKrg+82p/PCr7XtSBuTC3l/dRJz7+xD84i/fpPqb9Ry5+Dm/H6oNn78k835/JJo5OEBbzJsdACrDxcTZfCjx8/D5BzXZjRo/WDAo1B0BH66G2K7Qd/7pAW1+00iHLhd0P0WOY9WFgAeyDskKYEqlQjiRSkw/14Y/RbZpTbc5gtgrHEa6LBLu1h0J3mNnSaJSTFIC9PeeXKtyT8gIkOTwdDyctnOXi4pVmtfg/YTRMhx2cQw2V4hSYQarSRhNeoPCa3FZH/MeyLsmMLkmqQzSkpSwWFY9KhUjo77FNpdJdexfQtEUKu5lpXnUHnZK5T7xVI8fgVBJgPvbCyiT64Bh9tNsEnHkbyGPdlsTjelVU6u/Pgwt/a9kZtuuJ99mWVszPYQlK7n3ktaoALCLHpCzAbeHhVLjEVLSrGDyGAT/gatz8qV0Z1i+GxtMknHPffkngm8viyRjdXVMPN3ZDJ9ZBsm9UzA36BF5x8Kba/AFtsNnHbcGh1+Qb4nJ/8Kfjottw5oyq+7s32uv3tI8wZNmRUUFM4f51zImT17Nt9//z1XXHHFGdnfd999x4MPPsgHH3xAr169ePPNNxk+fDgHDx4kIqLh/tGUlBQefvhhBgwYcEaOQ+H0yCiuommLM1cW6pPCJOmZPgGhfipSSy+AwVVEW5nps5aICa2Cgi/0Jug4QQbWa9+Q1JXY7tDnLhn41rReaU8y46Xzq59KtXM2XPO53LS0vUrapjJ3Qv/7Jab88G/yuPbjYOJXUJEPqKVsvapIbm7WvA5dp8gstqNKRJu2V0lKTECsCDw7Z8uAvvkw6HGztHpl74J9P1Ew6guc4e0Jy92PtjAZEnpTZQjFeeWH+KeuRLXrW3A5JWo2IAq+GAUVebibXcq3eyt4Z52YIodbDLxwdQe+3niU6SPbElVnll5BQeH8k1du58VF9T1lSqoc/GfBPmZc1/WMCLBtoi08OKwFb/x26NgpL6fUSkRkNFd9tZfk/AqeuiSKHgl9xNx2zw+SkDfwkdprccZWOR+OeFHaVX+6GzQG8RlrfaVUoATEwrbP4ed7pWKk+03SVtT7djxuJxqNDo+v1qsLGY1exiYuO1z3g7RALfyXrGs9EibPhp8fFO+gLZ/Kz8jXxdx474+ACm5ZKcbGM4dDVTE07i9tWKtfgcPLZF8tR0DaZtgzBwz+0hockCDn+aPrpHLHXgkdJsj177spDXqqeaI6sTo/gFWHU5nQLY5liUV8vSWLLenlvDu5Cw/P2UmQueFJRINWTUmVA7vLzYw16ThVTdmTWXnM0BjgrsHN8HeX0FS1j9HbX4bCJLqHtqDI/zEW3tKTMZ/updRaK+Z0bxTMTf2bcM0H672ea1jbCJqGm3l35WGv5c//up9L20TiX6cV2BDoO7TjTNIs3MxTV7bh+V/2H4sKV6ngvqEtaBsdcNafX0FB4fRReTy+MmbPHjExMaxatYqWLVuekf316tWLHj168O677wLgdruJj4/n3nvv5fHHH/f5GJfLxcCBA7nppptYs2YNxcXFp1yRU1paSmBgICUlJQQEKCe2P4PV4aL1U4u5bWBThrQ6i+ajc6bJIKTV5Q1uMmufne05LtZed+ZKVP8UJRkw/3aYMlfMZBVOyj/yu+jxQFm2CCdqHWh1UqXjqJJUqMJkaYfq/wBk7xbx5XhqIkRXv1x/3T2bxTDghxtkNnni1zJoPt5sMawFTJwFX44WLyoQj4iqYqmycTqgRbUp8u45clO0/L/1B98GC0z4Er6dBAYLORN+oaLKhsWgJtDsRzl+7CnRkZhTzsg4O9GHZ4s4dOR3qSKqpnLqErY6m3Iot4LmEf4E+unQqCHQT098iJJSdbb5R34XFf4S321O47G5uxpcv/qRwTRqIFb8dCmzOigot7MjrRgPHlpEWPB4PIx6V6pgYwKNrL4hGt2348Ukt3F/MS+O6SpeY7l7RbT4fmqt51cNLS6DbtMk1el42oyCNmMgqj3O0kwqo/sSYD67ovKZ/C6WFWRicRZCVYlcE8pzvTcwh8PYj6RSc/a1tcvDW0HHSSJy/XSX17kakMqnG36V97I4BWK6iZhvDpMJidT1cr3ofrNc51pcKkb/B38R0QgP7JhV/4Aj2uC69BneOxTCuxvy+O72Pryy+ADrkwrQqFXMuqUXOo0ag07NvbO2k5xfvzLnul4JlFmdLNgpCWhNw828NqoJAe5i9M5ynDp/LMHh6Hd/S8CaZ+o93jnseY42nczWjEoKy+00CTeTWljJ1pRCHh3RmuT8CkoqHbSJtrBkbw4zVh7G6cMb6rXxHRnX/fRjvv8qFTYn+eU2dmeU4HJ76BQXRKi/XqnGUVC4QDnnFTkPPfQQb731Fu++++5fbquy2+1s3bqVJ5544tgytVrNpZdeyoYNDRvHPvPMM0RERHDzzTezZs2av3QMCqdPRrGYCZ6on/gvYyuVioGTpECFm1RkVXhwuj1oz6bx8skIiAFjkJQdK0KOgi9cdkjfKolTpRmyzC8YLq+OSz26VjwZLvk/8Z8Z/6mIHoeW1u6j6RDofQf8cHP9/Ud3hsIUqCqQ9qiuU2Hbl74TM/IPyUyprs5NSdom+X3HN/L7zm8kpSRrJ5Sm+55BtZXBzm+h5+3Y208gdNN7RO6oTrlSqQhqO5Yug/5LSaWBlRlOhoX3JnzpPVCTPGMIoHjoS7y7XUW2LZ2YICPD2kYQH3JmbgAVFBTODjVpPw1x/BRjXpmV/HI7FTYnYf6G07q5tBh1WIw6Qv31FFc6KKq0e/mtRwYYcCatQldzXs07AH3ukfPnqDfF02Xnt/VFHJBqkiX/9v3E+xfCoMfAYUdrDsNgzQVzo1M65gsBDYAhUPzQjhdxQM7D6ZskjdAcVl2lCZTlSIpU7t76Ig6A2yltwI37SavuiObij1aYLP5rf3woH4BtX0gb1a7vpRJnnx12fQdXfyxBFvvm16ZZxXWHwU+gmXcbo8f8yNvrPWQVVzG5ZwIHs8t4cmQbVh7MZVibSFwuDx9c342Hvt/J7owSANQqGNUphh6NQ3jw+x2AVOe8e2UUrbZNR3Nw4bEPpafV5ai63uAzUUq76jkCEobz+tJUDDo12SVWwi0GPpjSjcahZpqG+wNwJL+Ct5Yf8npsVICROwY3IzbID4NWTUp+xZ8WUQrKbRRU2CmpchBi0hPmryfwFFJizQYtZoP2jImo55riSjv55XaKK+0E+sl3XknEUvg7c06EnKuvvtrr9xUrVrBo0SLatWuHTud9gpo3b94p7zc/Px+Xy0VkpHfJYWRkJAcOHPD5mLVr1/Lpp5+yY8eOU3oOm82GzVbrN1FaWnqCrRVOhbRCufCdVSGnIEn+tZxYyIkwqXF5IKvCQ7zlPAo5KpWYuKb+cf6O4QLnH/9dLE6VChiXvXZZVRHMu0XMg1Vqabcyh4ungDlcElN63Cw+BlqjRI0vfqx+fKtKBf3ulxaB4qOyLLZ7reeOL5KWSxtWYTJEdQQ80GWqRNJq9HDgZ5h3qxzL1s/lMVqjzHZrjZJEVZoByauouO4XjGteRLOvzvnf40G9dx5mj4c2fV5kUQEkR3VHdd0yzM4iNHg4VGHk5bXF/J6Uzwtj27M9rUhMIhXOKv/476LCX6Znk4YTnDrGBRLgVzs8Tcor57YvtxzzF1GpYGznWB6/vPUpJ97klFr574K9/LpHPEA+ntqdqAAj2aVW+icY8Du0sHbjmkmgmZdL1PiEr+GLK33v2BwmZu5RHeXcmbXTe336ZsjeC0FxaFqcGUuBupzN76LBlg9GCxz8teGN0jZDkwFw4xLxyTEGVZvbWyFlbcOPS90Ave+E3/8Hc2+Bq96XVquwlmKUX1Uo1625t0DPW0Hrh2vkm3jcTtSOStR975NJibJsaSXO3iPbVhURUnGIJ0Z0oajSTpvoAKZf2ZZD2WUkhJi4YeZmymxOQsx67hzUlGfHtKfK4aTS7mJ1Yh4Pzdl5rK3o3r4RtNr2LJqDP3sduurgIjm2PnfJ8dfFUUWYuow5d/ahsNyOXqsm1Kyv9zkN9NPSLiaAvZny94oN8uOFse155uf9JOXJ9Vmlgqs6x/DE5W1OK9kprbCSu77ZdkykAhjSKpwXr+7wt24zziqu4pEfdh0zoAbo2iiItyd1IS5YqcxV+HtyToScwEBvz4+xY8eei6etR1lZGddffz0ff/wxYWGn5s/y4osv8t///vcsH9k/i7SiKtQqzq5KXpAkPeym0BNuFlETQV7qJt5ynvvXI1rLzJPLKSaACl78o7+LbpdEa9cVceqy5VPxFdD7SwVOUIKkWB1ZDYlLarfzC4arP5LP2b6fZH8RbWDAQyK8GCy1vhDOKtlfVZHv59RXbzt5NuTsleQrjVbMi6uKoc/dENwYMrbIdn3uFh+dw8tFSBr8uMSWb/wAj8flLeLUQb1/PuH9HqdDbAgTPxahc3y3OML89XywWqp8OsYFYtRpuLlfUwIUY+Ozzj/6u6hwRoiwGJjSuxFfbzzqtVyvUfPcVe2PjQ+yS6xM+eQPskqsx7bxeGDe9gxC/fU8Mrw1+pOk6VQ5nLz1W+IxEQdg+f5snr6yLfd8u41yhwq3zsKxvcR2E/HCXgF5B8XTy+AvaX91aTxAzrONB0graptRMORJSf3L3i3bqNQQFA/r38LT/HLsTvdJj/d0OJvfRZUhQKpu9P71VxoDxTNIpYZtX8m1o8M1Iqxs+QzPwIdQnSBoAp1fbfpiRZ4INx6ksnTgY5JQdeAXSbHaMANP/iE8HSejTlwkAtDHgxvctb/ZzA05n+GqKsIVMpEcWxA9mkRx0xdbjlV6FVbYef5XmfBd8q+BvLhoJ7vSS7z2M6q5Ds2sn4/fvZC0Anrd4ft90+iIDzYRfwLxIMRs4IWxHbjmgw3YXW4evKwlT8zbTeZxn/Mft2cSajbw6IiTf84B8sts3P7V1mPR6jWsPJjHsz/v5+VxHfE3/v3GlyVVDv49f4+XiAOw7Wgx9367nU+ndVcqcxT+lpyTb/PMmTPPyn7DwsLQaDTk5HhfXHNycoiKqm9ym5SUREpKCqNGjTq2rCb+XKvVcvDgQZo1a+b1mCeeeIIHH3zw2O+lpaXEx5/7vtW/E+mFlYRbjGjOZitTYRIERoP6xBe+MD8VKi6ACHKA8DZSppuzB2I6n++jueD4R38XnVaJ6m6I/ERpJ2w/DlLWiJCzZ55U49QVcqqKxPOm3VjxKLAW165rdbmIn1qjmEvuWyDl7Gte8/2c7a8GVNLqNfI1mX39/ZXa9bu/F5Fo4jdS3bN7jrePxK7vIbI9njHvYa0ox8etguDxoHOUs/2omond4xjYMoIAPy3Tf9xDiwh/xnSWcvhwi+GiLQe/2PhHfxcVzghBJj0PXNqCAS3CeH9VEvnlNno1CeHOwc1ICK29AT5aUOEl4tTlq41HmdanMXEn8cHKK7MzZ2u617J52zIZ1y2Oj6d2Z9HuLPLb30TE4epzZVAjEXBqqCgUo91VL9Yui+kihsYfD/UW2A0BMO5j+PVR8Q+zxEgLTmUhlXYnZaXWkx7v6XA2v4sulRa1owI6jPdO1VSpYOwHEjVeI1gBbPpIRBb/CFSbP5UEsLVv+N55u6tFqKkhcwdk7YBNf0hS4tUfQWCsGFBv+ghV+/Folzwu4lqbKyG+F6T5qGDWmVC5naj+eBc1oNv1DcOaDuVw31fqtevV8M6KQ0wf2YaNyYUs3JmJzelmSOsIIvUNp1sB4k13PEGNpErrFGgTY+HX+/vz2doUDFq1l4hTl6//OMoN/RqfUlVJXrmtnohTw6I9WTwyvNXfUsgpKLex4oCP9j9ge2ox+eV2RchR+FtykVnoe6PX6+nWrRvLly8/tsztdrN8+XL69OlTb/vWrVuze/duduzYcexn9OjRDBkyhB07dvi8+BkMBgICArx+FP4aqYWVhFvOcvtD/iGwxJ50M51GRYhRdWFEkIc2FxNAXz3lCv/s76LGKMbdDRHcRFqc3A5odQWUZsoMaWWBiDt1cdrEw6AiD/DAhndh1jWw4D5IXimD0P4PysA9upPMTh9Pn3ul5WDZdPEoCG7sLeLUkLtfPHM8Htj4Xv31OXtg33y0pqATvnyH1kx2qZWHLmuFCg8aFXx1c08+ndadcd1i6RgXSNNw/7MrDisc4x/9XVQ4Y4T6GxjeLorPb+zBvDv78tzY9jSPsKDXaI5tk1IgrdgGrZqrOsfy/FXt+c/odvRrHorV4cbqdJ30eSptThwu77t4u8vNgh1Z7M4owajT4A5rjafTZFlZlinn1BpcVWLwHt+rdlmfe+CXB+tXSdpKYelT0PsuuPwVaUENbgzGQI6UQqXj5Md7OpzN72K5OgCPtUSEqFZ12sKaXSKG83VFnBo2vg+N+shEgs5PEqqOJ6yFxIcfqNPOFty4tuLJXgGLHhd/nubDoPMUmRRo3F/M8Q8ugkGP1hdM1FqZVNj4vtdiTfIKIvPW0zrKd4XQ/qwyNiQXsuJALq+M78i1vRJIL6ok23qScaruuDYlvT+u8V+QRwilVY4TPxbQazQ0j7Dw9Kg2PiPLa7A63FTZfX9u8sttZJdYKbPK8+WX2XxuB5Jj0NDzVDmcZJdUkVtmxe3DfPlCp8za8PsH4p2joPB35JzLsl26dPFpcqxSqTAajTRv3pwbbriBIUOGnNL+HnzwQaZNm0b37t3p2bMnb775JhUVFdx4440ATJ06ldjYWF588UWMRiPt27f3enxQUBBAveUKZ4/UwkoiLKfe73va2MtlJqzxqUXLh5tUpJVeABcurQFCmolpbM9bz/fRKFxIaDTQ7UbY9KG0WR1P9xvFcLMyH4Y+LQPlZkMlDWrwEzL4PbxcTCabDZEKGVsZfHqpCDsg1WBbPpN2rEnfinlkylppgXK7xPBSa5DHh7UQMSh3P/T/V3XUbAMYAvHs/JaGJBbVts+xdL4eT2x3VBlb6q13NhtOgSeApuFVpBdWctes7fx8b38SlOobBYW/BUEnMGFtFm6mZaQ/00e25cftGby2LBGDVs2oTjHc1K8JJr2mwcfWYDZoMWjV2JzeEzZfbTzK1D6NmNwrAXPZDlQhTavPlSug5XDYOEPOj1m7IGUddJxY3Yb6i7QWNdR2mp8o58+1b0hVisuOp9cdzDlo587Ykx/vhYLNrYKQ5pC2ATpfB71uhz0/SkXMj7c3/MDDy6Wq+LspYrp/y3LYO1+uTwm9RQD58fba1ipjkHgSFaXU7qMkTYSa1PXQdCB8fgXctFT+rSqSapxRb0HhEcjehTu0FapGvVGte9O7eqiaoJ2fMKn9m/wnu75hdesofzKLq9iRVkxaYSUzVhymzOakT0wUNzYejCZlVb3HuBP64QxpiWvYS+iyd+CI7oajyVCeWlnMykOraBcdwKMjWtMy0oLZcOJbLaNOS/PwBmtSMes1+B33Oc8vs7EqMbe6ms1O98bBPDisJbFBDXvgaNQqLMdV47jcHo4WVPDuisOsPJiL2aBlap9GjOkcS+Rp+PKcbwL9dKhU9U3Sa1CqcRT+rpzzipwRI0aQnJyM2WxmyJAhDBkyBH9/f5KSkujRowdZWVlceuml/PTTT6e0v4kTJ/K///2Pp59+ms6dO7Njxw4WL158zAA5NTWVrKyss/mSFE4Dj8dDakElkQFn0x/nsPwbePKKHBAhJ7XszM6S/WnCW0HaxvN9FAoXIkHxMPk78bmpQeeH6/JXoeiozGa6XbD8P9IWFRADV/xPbibm3yWzzE6rzFqW58Kql2pFnLoUJMmNy/JnJb1ly2fi+VBVIFGx824VDxxTCHSaDDqzPNfEr+UmaOLXYlwZ1UH2p9Wjsp3AgNNegc3ppmjkh7hju3utcjUayNG+z7Ex08klbSK5+oMNdIoPJCrw4hlgKigo/HniQ0z8Z3Q77vl2Gz9uz6Cwwk5WiZWPfk/m1SUHTyn9NNxi4LpeCT7XbUwuwM9ehGXFE7Dy+epzZYZUfUz4UsSE9e9A33tEoLCWQpcp9Q3jj8dlF9FHpRLBovO1NI4IOrtjnzNMiKcQ1aGlYuTsHylilqFacLCfoO3IViZiTVkWzLwCtCaI7gDdb5HK0fl3QmWhbBsYL21UK1+ovx+3U6pKIzvIBEVxKlz1nvxdRrwEyaskhtxegSsgHpXGIIb7E7+WZKvYrrX7spfj78M+TaWCaX2bcLQ6inzBzkwmV39W3liTx6E+L+FK6C8ba43Q/wE8UxfgHvo0uYUlLNcM5If4J/nCMZSObx3kp105lFY52ZBcyNj31rM+qQBPQ+pCHeJDTDQK9d06dVP/Jl7hIMWVdl74dT8Pz9lFUl4FJVUOlu/PZfS768ivsHF5+/rWEgBXdY4lzN/785eSX8God9Yyb3sGRZUO0ouqeOHXA9z19VZyS323el2IhPrruaJ9tM91fZuFEmpWQhAU/p6c84qc/Px8HnroIZ566imv5c899xxHjx5l6dKl/N///R/PPvssY8aMOaV93nPPPdxzzz0+161ateqEj/38889P6TkUzgxFlQ7KbE6izqbSn58oF9xT7FOOMKlYnXYBtFaBJFftXyDVDpbIk2+v8M9B5wdNh1A0dSVVhRngcVGkDmXWXit9Y7V0mjQIC5UEhESg2jlbZior8mSQ7HaCSgM6oxiA6y1SedMQB36GtqNh9cu1y3L2yL96fxGC5t4kVUItLpPZ0Xm31Uax+kfKQHvrTDzF6dD6SlS7vvP5VJ5ml7Axy0VoeCSH+rxPtLYcnb0YuyGYfSV6ggihdbSGX3dl0qdpCP+7plO9waiCgsLfk0A/HT/tyKS0qn7rxIHsMnanlxB9kiQeo07DnYObYXW4+W5LGq7q1pHujYN5bXwnDOVHxbAdRKA5tEx+klbAmPfwGIPA44IrXkW16iXQjxAfF7XGd4WkMUhMf+ffIYa4ncQbrH+zsAYrBi5ENLYSmRgY+bq0fqf8LpXOh5ZIvHjiYt8PbDKo1k8otBmoPHJ9UKmg6w1S8emohMAEqepc8kRt0mgNOpO8v00GS6t8oz4iANU1kW5/tVSE/ngHuvJc6HWnVPAcXS8TG8NfgL3zIHEJjpYjiY+JJdy/iLxymcAINul48oo2/LAljdsHNUOjVqHBxRWt/GkS1IbnFicyflYqjw96jksHaYgIMKL67f9QrX0DLRAHxCT0xXrle/R5/7jjr2b6/N10iO1/0smHyAAjX97Uk3u/3X7MdFmrVjGldwLT+jRGr62tyMkptTFve0a9fbjcHp6av5dPpnWntMrBuqQCoDZa/dERrbyqgypsTl5bdpAKH21bW1OLScwtP620rPOJxajj6VFt8eBh0Z7sY9+zIa3CeWFsB4IVIUfhb8o5F3K+//57tm7dWm/5pEmT6NatGx9//DGTJ0/m9ddfP9eHpnAOOFogsx6RZ3NGPS9RqnFUp1ZwFm5Sk1/lxOr0YNSeZ4+N8Nbyb8YWaD3y/B6LwgWHAzWPLStg2f4SVIDbI7PC31SfUiP8Day7XYvOVgb75sOeubJCpQY8Uncc1RHGfiitAQ3NKhsDof14byGnhv4PwPYvIbSFpLSUZsCiR723Kc+R0vlJ3+Ixh1Gu8icgqkN9TwWtEYY8SRN1DFanm4CoGGwuD3lWB0athhZBanRaFWqVipEdY5nat8kJ2zAUFBT+XhRX2vltX06D63/Yms7Q1hFoNSe+3odbjPx7ZBtuH9SU4koHZoOWULOeYLMel9oMGl1tq08N2btg1gTcV30IATFo5t8hXmW7vpdzX49bJVXpeAY8BNu+lIrHdW9JkIG1FHPT8RRVOogMuDjaq9xqHRp9dcWlrRRSN0qa1LKnJbEqeZVUedYlsr2ETJRmiHBzyX/ET6ftGElK3DpTfsZ9Cts+B3NEfREH5DqTsh6aDgK/EPjueig6Urve44bdP4iPTtur5Pgq80T06XytVPosegTGvAcZ2yluez2hRjPf3NKLUqsDh8tNUYWDT9YeYVtqEaWVlcwYEYhl3yw0a7bTMawVV99xM/utIVS4dWBx41nxCOrjotjVqevx++kWfrj2PYZ9erjey8gptVFcZT+lKtJGoWY+v7EHBeV2qhwuAv10hPsbMB3XmrU5pbDBfRzMKcPhcvPedV3Jr7BTbnUS6KcjzN9Qz+S4tLqSpyF+2p5B/+anNiF6IRAZYOTlcR15ZHgryqxO/A1aQv0NBCpJlgp/Y865kGM0Glm/fj3Nmzf3Wr5+/XqMRjnRud3uY/9X+HuRWigz9pFn0yMnP1EGE6dIZHUEeXqZm+bB53mAZQ6Xiom0TYqQ8w+i3Oogr9zOxqQCrE4XfZuFEmEx1ptFUqvApNfg8UhS6/HodWrsuiB0bUbBN3VMjj11Ks6yd8nscvebYcUzvg+o5WVSlTPxK2mrKkyGsFYyu1xwGDK3w1UfyMD+0FLf+3DZIW0j9hZX8uaWEh4Z+wWGnV+g3vE12MvxNLsEz+AnefJ3Gz/sWMO7k7uwdH8Ot/Rvws87M2ka7s9zv+xn1cODiT5B37+CgsLfF5VKheEEscv+Bi3qU2ivAvHKMRu0NAr1Xq6xhEP7CbDzGx8HoMYV2wvdwrsgYzP0uRtmTYSI1mLYG9pcxJzioyLY9K6uGkleWbuP1S+jmvAlYZRQoWrYC+VCw64PRtfrThGuWl4G/hHw68PiTZOxFSbNkqSq5JVSqdnleqnY+fF28cIZMh22fwV7foAr35J2221fVgtmHvn/oMfgyjfgjw8lbTS0hVxnYrtJK6/WJC1adUWcuuycJdciRwUsf0YqoY6sFh+3UW/jObCIyht+40iZmVCgzOZg/AcbvHbRKtLCiz2sBH0+6piYp05Zi2HrTBJGzeSuTaE8NziQqAO+LR9UGVtopsvn0UGRvLK6vuioUalIK6xkR1ox2SVWujYKJiHERLilfmVpiNng088lp9RKSn4FKfkVnEizVKnECyfQpCfwZJMeKjDo6ntH1XC8nw6Aw+kmp9TKjvRisoqtdEkIolGoifBTHNNX2p3kltrYlFJIudVJr6YhRAcaz5iHjcWow2JUhBuFfw7nXMi59957ueOOO9i6dSs9evQAYPPmzXzyySc8+eSTACxZsoTOnTuf60NTOAccLagkyE9Xz7jtjFGRJz3VQb774X0RXi3kpJZeAEKOSlXtk7Pp/B6HwjmjtMrBnK1pPPfLfq+y+2u6xfHY5a292ojyy+2MaB/F/B2ZPvd1Xa9GGAPCoDwA4npKbKwxEHIPyIC6PEfK/iuLpCz90OLaGczozhKnqjFIvEV0Z9npsGclASt1I6x9XUSdgQ9LLHnHibWeVL7I3oWxNItJbe6g/VsHeGDIRCZPvg6tRsXWbBee4hBmb9vC6E4xBJn09GoSwrsrDnOkoAKzUce4rnFKSbSCwj+YMH/xt3l1aaLP9df2SkD9V9PqdCYY8gSk/1F7PlOpoMlg7IOn4/G4UDXuD3n74eBiMYBf9ZIkM/W7HyZ8JZUpLruMQawl0gpbE09ddATs5RhMJvz8L57zmTEoElv7CRh+vgeK0mDcZzBrgpjotxkNwU2lQmf482Arh53fQlGy+NioteKjFt5aIrkX3CPiTN97JaEqu7pVd/XLIrp0uV4qf0rSYf1bkvh1aAW0vwqK0xo+SEcVGAPEHNleAT1ugfie8rcoz0OlN/PhH3m8s24PT1/ZlsahJlpFWjiYU2t6/PyloQQvHl+/IsvjJmTJvTw+ahF6Z2nDTrqAuiSN0U2a89Y6b2GkTbQFp9vDpa+v9lreMTaAD6d2P2lbIEB6USXTPttMUp5U0M68oQdqlVymj2dgi3CCT7FqNcxfz6QeCXz0e7LP9Vd3jfP63eF0s+VoETfM3OT1WtrFBPDJtJO/lnKbk193Z/H43F1exz6iXRTPXtXep7CloKBwYs652fH06dP5+OOP2bRpE/fddx/33XcfmzZt4uOPP+bf/5aYwjvuuIOFCxeeZE8KFyMp+RVn1wk/d7/8G1Q/Sr4hgo0qtGpIK7tAmtfDWkHWdnCdOE5R4e/B0YJKnv15f70x4pyt6aw/XHDsd4fLzRfrU0jKq2B0p5h6+2kXE8DYLjESwe0fKaXsa16HH++QFI+Rr8GAh2HcJ7DxXRkAD3wcrpsjQs9Pd4nRdkAU/PF+dST53ZC5TWZbV70gNyhhLSX1KnUDlKbLjHRDhDaHkjQCSw4Sajbwv+UpdJtxmE5vH+JfC1MptTqJDjQyvlsc/124ly7xwSzZl0OzcH+q7E7uGtIMo+7iaENQUFA482jUKsZ1i6NtdP3o6Ik94mkadobS64LiYdpCuHYO7j734L55Oa6EPui/m4Thw9540jdLS2pRCjiscNtqqZrNSxTRYtuX8M14MYN3WqVaJayl7Du4CeQdRK3ynJI584WCRq3C5heNZ9gzkL1DfG9uWizivt4M5dl4XDY89ioxxm9+iYg7ZdnSbvvhQGnvHfCQCDgZW+HXR+SaVNcDMP+QtGvNvQV++w9U5EvLVdNBUqEcFNfQIYpgpjOLqHbNF1CaCQvulQj47F14ulxHZoUKjwf+u3AfJr2Wxy5vhcWopVGoGAy3CXTI43xhK6WxoQy71l/EvYbwCyIw5Vc6xQcdW2QxaHllfCfu/HprvaqXXRmlvPXbIawniaMvszr4z4K9x0QckHbCR4e3rrdtmL+e/xvVloBTbCXSaTTc0LcxzcLrf4duHdCEuGBvYSa71MqNn2+q91r2Zpby2tJEqhwnHrNmFlfx6A+76glQi/dms2hP1imZQisoKHhzzityAK677jquu+66Btf7+Sll9H9XkvLLz27iTN5B6ac21B/0NYRapSLCpCK19AIyPHZUQe5eiO50vo9G4SzidLn5ckNKg+s/WJ1E/xahhJgN5JfZ+HLDUSrtTh4c1kpakfblYHO6GNginHaxAUQZHJCXJiadLoeUwO9fIDcZ+Qfh2u8lRaT5pZCxDcKawczLZdvQZrL8q6tqZx5LM2XmOXMn3LgU8vbJMq0RRr8j5fD97oP0zfUPXqOHpkNg43vQcpIITHWY1qcxJp2G1yZ0IqOoijsHN2NTSgEJIX7c2K8JMYFGpaVKQUGBqEA/PruxJ7vTivlhWzpmvZZreyXQNMxMyJk0Pg+IgYAYCgPaEPLLrWjSaltwVMkr4ehaSUzSGGD2tdBkoFTkfD1WKklq2DNXzsHjPhVxp9+/bSTFMgABAABJREFUwFGB0w0ql/ukfj4XCvllNgKrslF9PV781AY8JCmI+YcguBH4BaOylkL6ZjyhzVH5hcL+n8QXqIacPfDT3XDpf0ToObxcqqYNAWCJlrap4+lxi4hGB3+VytCRr0NIU6kGPZ4uUyVZ8ZKnJO68Jk3LaYPNH0PKGq7p/yk/bJfFP+3IZEgTI5vuaII79Q9cHvBTnXi8aNLCgWIDzdqMQbVvfv0N4rpD7gFMahdjOscQYNTSu2kow9tFcTivnJSCSp/7nbctg3uGNicu2HdaFUBBuZ3lB7x9bH7ZnYXJoOGTad35PTGPrBIrQ1pFMLBl2An35YuYID++vqUX21OLmb89g0CTjut6JtAo1FzPj25vZglWh+9x8k87Mrj/khbEhzR8Wzl3a3qD6z76PZkR7aIuGnNlBYULhfMi5Cj8M/F4PCTnVXBFh4Cz9yS5+06rGqeGCL8LSMgJbS5pDWmbFCHnb47D7Sb7BBGf+eU2HC4RVVxuD/2ah3JlR6m6cbrcNA03k5Jfyfurk3hjVAIcngO/v1wrxKhU0OdeGYA7bbDieSk573wtZO2EXbNry8m73wyrX/FdPp64CLpNg18eqjW3DGsphpcpa2WQ/vurtYPomtSq6gG9I6Ij2aVHAUnimNwzge6Ng/k9MY/CCgc70oo5kl/BXYOb8cLYDrSJtmDSK5cnBQUFISrAiK5RMM0j/VGrVISY9WfNC8NYmoI6bUP9FS4HJK2SdlWDBbpOhf0LvUWcGqqKRIgY9Q5U5ECjfmTbdIS5Lx4hR++xoln3mog4ai3E95KWWo0Ohv5bKj6rRXwVwLVzYMMM3ztb87qI/4eX18aZX/U+LH5MJuBAxj1dpknr1ZrXZFnWDihJhZFvwLLpx6VWjcfTZQqqzZ/A1i98RqKr8g7QxHaAhJBQUgsrGZygZVDhD+gX1LlOXv2xtIHVRKLXRWvEaY6kIEuF/ZJn0TutqOqmdSX0kevr3JupuGYO1zVrxITu8eiq/8ZL9mY3+P7aXe5j1/eGsDndPi/Jc7aks2BHJl/c2IPujUP+0mcqOtCP6A5+XNYuEjWqBlsVTzRWcbg8OFwNj6E9Hg8ZxVUNri8ot+NSKnIUFE6bczJSDgkJITExkbCwMIKDg09YWlpY2LAbu8LFTVGlg7LqVoqzgtMqZnmtR532Q8PN6gtHyNEaIKQZpG+Bnree76NROIv46bQMbR3BmkP5Ptf3ahKCpTqxQqWS2bPH5+6iwu4ixKxnWt/GtIsNYN3hfDrq0mH1S9478Hhg/dsyM6zzk+oYEPPjqA4ivtQQGC/l8Q2RuU1ExpoY8vxEWP+OzJgeXS+Gk3hkplWtgcVPQM4enP0fItPhz0fXdyfQT0uV3c2ve7K4YebmegPUgS3D6d4o+KK50VFQUDj72BwudmeU8NRPe9ifVYZKBYNbhvP0qLY0CTvz5sF+Kb81vNLgLwbvAx6Cw8vEO6whkldCQl9Y/gyeiV9xwN2IS+MunlZRs6cc9eFqM/vAuFrBpd1Y2LfAuxJTowNbCbgbaK+xlUoFzu1rxIdo9rUiDvW7T4Qbp128iowWqRKty6FlMgHRYQIM+be0+rqdcOBnVEUpeJoNRbXw/gZfR8SRn+ga/y8q7U76BeSi/7n6OhnTVaqwklfD0Kfh53/Ve2zJoGc4VOrH/y3Yhn5sB0YNeRb/7jdLfLrOCJk74IebsMYPwGqKJQCOiTgAXROCGzyuuGA/TCfxi7QYtQSbdBRVOuqtszndBJn0Z+x6qVWfeD+d4xt+LTGBxnrpWnVRqVRc2iaCn3f5qMACujcOxqxM3igonDbn5FvzxhtvYLFI6eKbb755Lp5S4QIkubrH91TM3f4U+YngdknJ72kSYVKxLt2Nx3OB9LCHtYK0P873USicA4a1jeTdFYcpqLB7Lddr1Nx7SQtMBi0lVQ5eXnyQBTtr+/gLK+y8sSyR2wc25cUrm6Df+EjDT7LrO2kB0JvBViZCi6NKBs6O6rJvlUpmOT0NCJp6CziPm1FLXiW+O3t/FIPKPveCtRiW/R9YorBP/I7fSuO46/O9aNUqPpranbwyG99trm9e2S4mgGbhZkXEUVBQ8CI5v4KJH23EVW2u4fHAyoN57M7YwE939yP2NNtJTooxsOF1LieYwuR8WVko59SG0PtDfrU4rrfQNdR0YYwvThG7C/z0/vI6HVW1r7XVSPjxNu+N3S5puT0RVYVQ6YF5t8j2IIJ/XcbMAL9g7+oYY6CYSKdukDAItRq+uErWFSRhHf4//PRmufb4wKEPpKrcw9RuYZg2PQ9xPWDQo+LZU3AYghMgvDWeGxbjXvcGmrz9uIKakNftX+T6NePFZZKYtS+rlOYRkUSq7YQnfoLf0RXgF0T+oBc47N+dJub6Ud3xIX50jA1gV0ZpvXXTR7Y5qWdkZICRR4a34skf99RbN7hlOBEB584gOC7Ij64JQWxLLa637t8j2xB1ktfSs0koUQHGepU9ahU8NqL1KXv7KCgo1HJOhJxp06b5/L/CP4vkvApUcNKT/Z8mZ69UHfhHnPZDI00qKp1QaPUQ6ncBDLTCW8GBhdWmf/UHBwp/H+KCTcy5ow/P/ryPVYl5eDzQMS6QZ8e0p3Go3KAUlNu8RJy6zN2Wzr96t0Xlf7eU+qvU4tFQt5e/PBcKU6DjZNj8kXyusndJqtW2L2WbI79Dy+FwcFH9J1FrJG63IKn+OpdDKsfcLijLoqrpZTgmL6DQoWVduot/z5cB6Ij2UWw9WkRJlYN3r+3Cy4sPkFZYhV6j5uqusdx/SYtTjjBVUFD4Z1BmdfD6soPHRJy65JfbWZ2Yz7W9Tj2l8mRY7U5sTYYTyH8b2MIjCUsV2ZC4GC55WipGfNHhGtjxDej9URkshBourtaR3cUGWnW4kcA1/5XEw4BoqRhWqaRVty4et4g95jC5vhxPaDMcGhM6e3GtiOOLinwwBHoLOW2ulMqX/EMSMd79pmOrStpP5T8rSni20034r3nW5y5zW05GvQ0md41EtdIokxrfT61NFQPQv41qwhfs6/kSh7OKKLRriHRHkHy0gv1Zpdw+sCk39W+CSa9hS4qHbzQ30Krr9ZQ7IKfUzHVtG/kUZcItRj6c2p13lh9m7rZ0bE43ccF+PHVlW/o0DW34fahGo1ZxeftoDFoNry45SHapFT+dhmt7JXDbwKZnLLb7VAizGJhxXVdmrDjMnK21r+XJK9rQr/nJX0tMkB/f3d6bF349wLJ92bg9kur1zJj2NI8485V1Cgr/BM5ZHVtpaX012hcBAWfRP0XhvHI4r5yIACN67Vmacc/eLSW6qtPff4RJHpNa6ibU7wKoCAivTiRI3wytLj/xtgoXDS63G5ebY9+Bmt+bhvvzzuQuVNiduN1g0Kq9TDzTCn2bJQb66Zg1IRbjr/dD0m8yVa0zQfcb4YpXJSEEILarVHj1vQeOrpGWqGHPyqA8daNUs+38VlI/snd7ez6oVDD8BfEgOB69WUrml06X3/2C2e3XD5spms1HCnl7hUT5xgX7cUPfxny98ShOtwe7081tA5oRYtbTItKfhBCTkk6loKBQjwqbk80pRQ2uX3Egh2u6x3m1s/wVSq1OliS5GT34OQJXTfdeGdZCDI5LM/BEdUZVli2CQ7urYe88722bDpbEpYLDMPYj3HYrav3FdbO6YFcWY5uPoFPMr2gzN8Omj+HyV6VFKiCmftLT+nfgitekWqeu0GMIIGvY+5R4QmmpKkPdkB8NiOl+XQPk/g/A3vmw6SOpcBr3MSyRhFtbyyspCO7Mjzv3M/X6K+gQ/SvarK1eu3P1uR99eFN6NKni1VWZvNDjFrTz7/AWcUB8gH5+kBZTf8UQ0BKzXoO/QUvXRsFM6hFHkEmPXivXqCGtI+gYF4jd5cbl9hDqr8dP1/DtVHSgH0+PastdQ5phd7rx02sarEx3uT243R50dcbJwWY9V3eNpV/zMKocLvQaNeGW2uM5l0QH+jH9yrbcMbgZDpcHk15zWkm0jULNvDahI0UVbXC5PfgbtYSdScNyBYV/GOdMyAkKCjphSWlNS4vLdeIoPoWLl0M5ZcQEnaUZd5dDosebX/qnHh5uks9mapmHLpEn2fhc4B8p6VtpmxQh529AYbmN5PwKvtp4lAqbk7uHNMfl9jDrj1RKrA5GdYyhbUwAM9cdIb/czmVtI2kSZiYywIBeq2lQ5Hh5eCTNf7sNVe7u2oWOSjGc7P8AtBwBR37H03UaquJUmHODzEa6HJC5FTpOkkSQwiQxLU5dL8lW6VsgZQ34R+DpfB0kLkF14Of6B9DjVtg959ivJb0eYsaWCu4coibAT8eYzjEMahmOXqtmzaF8JvdMYHNKEQ/P2XksgnTpvwYqIo6CgoJPtGo1oWYDxT48QgBiAv3QNmDO+mcwaNVsyXZSZOnLXTcvR7v7OzEubtRP/HF+vB10ZlSTvhZRZ9nT4tvS7io49BvghrZj5BybsQ0mfQt/vA/DngfNxeUBEhvkx/XfH+btK1+jXY8sIlN+Qm0Kl7FWv/th0WPeD8jehefQMly3raV896/4F+2lNLwb+VEDeGBJPkcLkvh6cis6DXoc1aJH6z2fJ743nsB4aHc1Hr8QPB3G4y7LRbt3Llz+Kq5GA/DsXwjxA8gf8gaHHBH8cVAEoynfpfLOqDdpq8kg4uhCPAYLtnYTmZ0Iz7y+DRCj/Rf6hvtOygIoPorRXkR0UCRphZXMWJlEdqmVS9pEMLhlODFBfqQXVbFsfw5rD+UTH2Li2p7xBB+X7uSLCpuTrGIrX208SpXDxdVdY+mSEHysQr2kysHRggq+3niUggo7I9pF0a95GDHVyY0qlersJr6eBkad5rTTserib9Dhb1DaqBQUzgTn7KqycuXKY//3eDxcccUVfPLJJ8TGxp6rQ1A4zxzKLadzfNDZ2XnBITHDC2nypx5u0qkI0HPhGB6rVNJelbbpfB+Jwl+ksMLG/5YeZNYm8YW5a3Az5mxJZ9am1GPbLN+fS+NQE/8Z3Y5bvtjCsn05tI8N4H/jO/Gv73YyuWd8vd5yo05NB0sZ6roiTl02fwpXf0xlv8cwHVoKK5+X5T/eIcaV0Z3E2ybvIKRvEvPj9K2w7m1JfgtvA3kH2ZjlJjR6FE26ZaLb+ZWYivsF4+lzDyq9Cda9CaZQins/yi+uXiTlFxFg1LLiQC5ju8Sy/nA+c7dn8M3Nvbjjq60UVdXekLWKtBDif/JBsIKCwj+TMIuBOwc15eEfdvlcP7lXwhn1nQk06bmuVwIPfr+TScGZROQfAr1JYreLJXmPgTeJt0ufu8Qk/vdXJJK8xTBofaVU4uz9Ua7fa18Djwf3pc9xAdT6nhbD20Xx6pKD3DIvnXCLgR+nPEycIwWW/ltSDke8KMmEZdliXNxmFM6+97M025+5qX3RafuTvrOKvYtS6BwXwKdjo4lRFeJoNBDtlW+gXvmCeN9o9Lg7TCSlw33cOiuTZhE3Qxl4lrtoHBpPq8ZPczC7lO8Wp9Kj8RAAtswr4uVxwaw4cAiACruLm+am0T4mgPemvEpGURVfrTlKeJ1qEZfHQ3G5jRM1q7udduZty+D/Fuw9tmzFgVxaRVp4a1JnJn60kZI617Av1qfw+oROXN4hGr8GJiQKym28tOgAc+rEby/bl0PbaAuf3tADf72Wrzak8L+licfWL9+fS0ygkdm39yEh5Ax7QCkoKPxtOGdCzqBBg7x+12g09O7dm6ZNm56rQ1A4j1gdLjKKqriiQ/TZeYKsXZIgYIn507uIMKk5WnKBCDkg7VW7Zou54kU2k6dQS0p+5TERx0+noVN8ELd/tbX+dgWVLNmbzSVtIlmyN5s9GaUsP5CLCpixMokXru7AQ9/vOJZeEW4x4Ffiw7OmBlspxfoo7lpaxcyusXgVL5eky0/2bplN3vMDFNcKSxQkQUESjtajWZli5bNNaYzpcA03jp1G02AN5W4DGnMoednpMG4FhQ4t726p4EBOPp9M687XG1PxACa9lvk7M3l5XAcW7c32EnEiAwy8P6WrUlatoKBwQga3jmBsl1h+3J5xbJlaBc9e1Z644DMfnpAQYmJK7wSsocGw9O76G4Q0larHOTdIgtPYj2S5swq2fg5+QaDS1AYWBCVg11qoqnJguYgMXaMDjbw9uQv/mr2DvDIbuzPLiAu2iWH+ujfFNHjodDHCV6nh8G8UJ23mmz3N+NelLbjtq60UVzoY2SaY/3QqJnzpNKgskJ03vwzHpO8otGnItarZkq8lrMxMWlEVSfm1MeK9moRwRYdoHpu7C5fbw/L9uQBM6B6P1eFmQItwDmSXARBk0vHMVe2psrl4dckB9meVseCe/hzKKWV9UiEeD+QTSJhGLxN/x6Mz4TCG8p+Fe+utGtE+iid/3OMl4tTw2NxddG8c0qDgkpxX4SXi1LAvq4yftmcwtE2kl4hTQ2aJlTeXJfL81e1P2LqloKDwz0U5MyicEw7llONBXO/PClm7IKiJpBn8SSLMKo5eKBU5ABFtpI87Zw/EdD7fR6PwJ/B4PHxbp/KmZ5MQ1iTmNbj9wp1Z/Hd0O5bszQZgzpY0JvVM4KVFB3jx1/08d1UHSq0OMouraB8TgMXvBKdwjZ70SjU70kqouLIvhuAmUHTEe5uqYqnEiWgLufu81+n88Ax6gsEVEYQGhxDgpyPVrWNPuoNmEf6YKkEXnEBGsZX9BaVc3yeC5hH+pBZUMr5bHDani/SiSn6+tz8Wg5aWERb6Nw8jrbCSlpEWWkT4E322zgcKCgp/G8L8DTx9ZVvuGNSUjcmFmPQaejQOIcJiOGHk8Z8lKtCPKzvEgLUIV4dJaHbP9t6gIk+qFvMOwq7v5acufe6BrJ3Hfi0c9AJHK8xE65wXlZBj0GroEBPAN7f0YldGCbkeFe4gDWqNTlrH0jd7R5ADpmvH8cTlrXG73XxzSy8O55YxNLQYy8wrvU2ODy9Fd3gp/tOWsbAojKaRZiodLj6Y0pWUgkqKq+x0iA0i0E/LvG3pfDK1O3szS3F5PHSIDWR9Uj4PfL+Dn+/tT7BJR+MwMy0i/CmtsjN/RzZPXN6WyEADUQFG3p7clcziKrYeLaJSb8Q18HE0K5+p/4IveYrNeTo8Pjyp28YE8NbyQz7fJ4fLw77MEp9Cjsvt5us/jjb4Hm9LLcaHj/cxFu7K5KHhrYgNUm7XFBQU6qOcGRSOYXW42JdVSuNQMyHmM9vucCBbzK7jz0aJqMsOefug+bC/tJtIk4p1GReQR1NocylXTtukCDkXKR4PVNidx37XalRYnQ2LhTanC52mtk3A6nCjrzbxPJRbzt2zthFuMTCgeSg9GgfjCGyMzj9SEkWO31e7CWCOYO4djdhVaiPm8m+JOzgT055vwWWDFsOh2w3wy0Nw6f/B4eXid2OvwN3iMtyDp7Mk00RSQQGjO8VytLCSlxcf4FBuOS0j/LljcDM0Kvh2UxohZj1L9+ZQWGHHpNfw2IhWLN2XQ7voAPwNWmKDTWc+IlhBQeEfQ7BZT7BZT6uovxaI4XZ7yCm1UlBhx+PxEOpvIMJiQHucWbJGrWL0Z3v5+abp+Ed1w7zpbSjLwhPThaLIPgT2j0bz4631n0CtheaXiDlvXHfcAx9jcXYsjS1un8lbFzo2p4f3VyVxpKACs15Dv9gmNOswAdWOb+pvbInCE9QYW6WbIJOOT9cewaj2MOLozAaTqgzrX6dt15f4aMNRVlVPcgxvF8mE7vH46TTsySjlq42pfPNHKk3C/NGo4Z3lh3BWv5dZxVbC/PV4PB7Si6qotLvo1zyMhFDTMRPeMH8NYf4GOsYFyZNG3wBhzaTduDAJwlriGfIUFVE90BSq+WBKN6ocLr5Yn8KOtGKAk/7tquy+X5/bA5UNrKvZ74nWO1wePL6UJQUFBQXOs5BzJvuaFf4aG5IKuPMbKYM1atVMv7ItU3o3OmP7T8wpIzLAeHZMTXP3yexQaPO/tJtIs5rcSidWpwej9gL4bGr08prSNkKv28730Sj8CdRqFeO6xvHrbqmw2ZFazJMj2/CDjzJrgMGtIthUJ6HlsnaRBJl0vDmxMyVVDuZvzyA6yEifZmHcP3sHraMtfDJxLuY5E6G0tu3A3ewS3IOeYMbiDBbvyebTaT0Y9VUKN/SewsOTx6ErPQpH18P314O9Ar79Q/wdLnsWV1xvZu+3U5looEuChfRSByPfWUtkgIGJPeJpEu5P83B/Hvp+BzvTS+q9Bj+dhogAI1N6NSLEpCf6LLQ+KCgoKJwuVoeLTUcKefD7HeSXS2tNoJ+Ol8Z1YGCLcMx1qnvUahXXdE+gUBXIs0e68fDYn8gtqUSt9yM0MIq95QF06XEv/ltmSPQ2gMECV74h59Sr3of8RNR75mCIfQw/nQY//cU1d6pWq/DTqxnaOoL/W7iXb27uxR9Z5Tja3k/L0iw0yStqNw6MI3/MLMZ/kUxKQSVGrZpHR7RiZEt/DD5alWrQFhzEY6tkSOsIViXmcVnbSC5rG8WjP+yi1Org/SndABFEkvLKvR7bOspCUl45A1qE8dOOTD5fn4KteqKkaZiZ96d0pWWkpf69hilUzKkb9QWXA4dKyx85au55Z/sxU+1Qs54nr2hDhMXA0n055JfbaBxqIqXAd4Jkxwb8H3UaNeO7xbFsX/3JFoDGYSYuaRPBuysP+1zfp2kIFuPFU8WloKBwbjlnV5Wrr77a63er1codd9yB2Wz2Wj5v3nERjgpnnZT8Cm7+YjNNw8w8NKwVqxNzmT5/D346DeO6xZ2R5ziQXUb82bqhy9opMciWvxY3FVmTXFXqpmXIBZKiE95a4qEVLlraxwbSMTaAXRmlFFTYsdpddE0IZluqd6SuSa/h+t6NuPNr8c8JMeu5vncjnvppD9uOFhNuMTCpRzzD2kUy6p21OFweDudWkGVoT8mwOUSri1BX5mO3JKAPiuTFpTksqhaQFu3OYkrvRvywPYMHmoNu3nEzyR43JC6B1I0cHLOYlgnRXPPBBj68vhvf/JFKlcNFSkElLy8+CMCIdpGM6xbnU8i5Z2hzkvMqcLhcjGh3ljyxFBQUFE6TtMJKbvx8s1d1RUmVg7u+2caCu/vRIS6I/HIby/fn8Nm6FN6a2Jmd6cU0j7KQbPXnpRXpvD6hE0/+uIcNyQXc0nMU9958HQH5O1CpNaA1wMb3IXXDsf1X9Lyfdglh+Ol0Z7zS+WyTXWpFp1bRp1kodw9uRpXDxb/n78WgVTN96FMM6jkdfVka/iFRHKyy8MAPuaQWitBhdbp55uf96Ea15LqI9qgbCG5whbYgJjwEe5maVlEWJvdM4JYvtxz7G21PLebKjtH8vMs7aUqjVnHv0BYcyCxhW2oxH/6e7LU+Ob+CiR9t5Od7+zecsOQfAUBiZgnXf7bWq6WqoMLOIz/s5NNpPVidmMesP1L575h23Dhzc71WqGl9GhF+Aq+3TnFBtI22sC+rzGt5oJ+OqX0a42/QMrR1BCsO5HqtN2jVPHVlOwIvonY8BQWFc8s5M9EPDAz0+pkyZQoxMTH1liuce/49fzcWo5YHh7WieYQ/N/VrwsCWYUyfv4e0Qt+zD6fL/qzSs9NWBZC1A0KaidneXyDCLELOheWT01YqLUoyTr6twgVJZICRj6b24InLWxMX7Md7qw7z2IhWTB/ZhkahJkLNesZ1jeWbW3oxc90RTHotU3onMOvWXtz+9VY2Jhdid7nJKK7itWWJvPXbIe4aLNVnz45pzz2ztpOvDmVWejg3rQ9h/NwCijwWFtYZ+M7Zmk6QScd/x7Sj2NwULFE+j7Wi251kugPZn1lKm2gL7644zDXd4+ttt3hvDjGBRj68vhvdGwUTbNLRNSGId6/tQu8mIcQH+zGkVeRZ8a9QUFBQOF3sThefr0/x2SLj8YihfEG5jdeXHuSxubsx6zUs3ptNqL+eIa0iiLQYefLy1mjUajYki2HvJ5vy+WivihJDLCy8H76f6iXioNGh7TaF7zenU1Rlp7jSh8HuBUx+mY1Sm4tym5OxXWP5qFossTndPLU0i8FfZDNysZk/7I0Z93XKMRGnLi8vTcLR7ZYGx2d5Xe4jpUzFw3N28tLVHfhlV6bX3+j9VYfp3TSU6SPb0DLSnxCznqGtI5h5Q3dQwVVdY5nRQDVLcaXjWGtUQ1Tanby/KsmnL47bAwt2ZvLwZa14c2JnuiYEs+Ce/gxtHUGwSUfrKAvvTO7CvZe0IOAEYktUoJHPbujBYyNkDBDmr2dK7wQW3NOPhBATof4GXrq6Ay+M7UDTMLE2GNUxmp/v7U+LCP8THr+CgsI/m3M2yp45c+a5eiqF02D94XzWHS7goWEt8dNLFYpKpeLGvk3Ym1HK87/s44Pru/+l58gttZJfbqdxqPnkG58u9nLIPwxtx/zlXQUbVBg0F5qQ00b+TdsIgePO77Eo/GmiAo3cOqAp47rGYnd5UKmgSZgJrUaFVq1mR1oxb/6WyIh20dw6oCkxQUZu+WILyXm16R09m4QwtFUEWo2Koa3D6RIfSJjFyA19GzNnSxoVdifju8Wj06gpszrrzRq+tjSRa7rFcSDAwKTR3xO35BbIr07KUGtwdLmRrWGjsbvV7MksISbIjw1JBUwL8t1iuXhPNvdf2oIrO0YzoEUYeq0GnVqFQa8h2HRxzTwrKCicHYor7didbswGrVfr0rmmyuFmf1Zpg+sTc8ooqXIcSxiMCfIjMaeM9jGBlFbZCQ8w8ti83XwytRuPjWiFVqPmj+QCAow65mVYGDvwGYLX/J+0VQGYw6ka8zEfbrfjUanILbURFXBxtZkWVNiJDjCyP6uMdjEB+Bs0x177xuQCVh7Ixen2kFPasEClVqk4YA0hdvRXhC27FyoLZYUhgKIhL/HhPh09W7kpqLCzZG82ibne7VNuD0yfv4dWkRam9mlEj8Yh8nCtmgiLkZwyKxO6x7NgZyZH6qRd1bA3o5R+zcJQqSDIx3Wp0u46lnrli6S8cp68vPWxGPP2sYG8NakzFTYnOo2a0FNMXYwK9OO2gTIGcHsg2KzDoK2t/I4IMHJtrwSGtY3E5XYTYNQpEyEKCgonRTlL/MP5eE0yjUNNdGsU7LXcqNMwsUc8761KYkdaMZ0b6P89FfZmyuCpSdhZqMjJ3iNtIX/RHwdEwIoyq0i5kCLI/YIhIFbaq9orQs7FTKnVwYHsMt5ecYi0wiraRAdwXa8EftqRyYKdmdw7tDk6rYrXliVyx6BmHK4WcQL8tLw6vhO70kv4fksaDreb/HIbl3eI5vlf9pFWVMWIdlEMOFZFV8Uv9/ZHpaLeLOPO9GLGd4tjry0I28jvMDmKUDutYArBagiFMjB5oE10AD9sTadJmJmsEqvP19MyysLDc3bxx5FCVjw06OxV3CkoKFx0FFbY2ZFaxDsrDpNdaqVzfBD3XdKCJmHms+OVdxL8dBpaRlrYllrsc32zCDNFFbWCRFaJlcEtw2ge4c/SfdlU2Fw8d1UHNiYX8cPWdGxON0NaRdCrSQhfbSxjQ0U7nr9+NWZHIRVOSLWaWXTAw6XtothytIgQsx6D9pwVwZ8RAo1a1HjoEBuASa+lY1wQc7akY3fJa595Qw/+b8FewvwbFu6tTjcujZHb1gfy+PAFxOrKwOMh1x3AGxtLWZucT9+2jQF5zxuHmtnlo2X3YE4Zaw/lgwcW783m3yPb8uT83fyRXEiYv4Eb+jbG7fHwzM/7vK57scF+3PbVFhwuD/cObU6XhCBCzLXii0mnoXm4P4ePE5BqaBlpIcDkXW1jMer+lG+NRq0ioloQaohwy6kJQwoKCgqgCDn/aNIKK1l1MI9bBzb1aTzdr1kYC3Zm8uZviXx+Y88//Tx7MkrwN2gJO8WZi9Mia4cY15lCzsjuIk3qC0vIAanKObrufB+Fwl+g0u5kzpZ0nv91/7FlWSVWVh7M5aWrO9I41ITd5ebhObsAcHs8x4SY58a0541liV6zhh+sTubH7Rm8PK4jN32+mU/WHuGnHZm8NqETN36+meIqB8PaRLL0OIPFxJxyBrYI5/8W7OWPI4V11hQT4JfKu5O7klZYQa8mIfx34T7evbYLry45WO/1+Ok0NI+w8MKvBxjSKpzQEwzkFRQU/lmUWR18uuYIM1bVtrxklWSzdF8OX9/ckz7Nws75Mem1am7s15jvt6T5jHu+Z0gLyqyOY79vPVrE/8Z35F/f7WBHWjGfTuvO68sSj01MAXy18SiL92Tz1S09+WpDCu9tt/P5+vzqtfLvzA1yXo2wGAi+yDxygkx6VCqwujzc++129mXVf+3/u6YjqYWVtI6y+Kxsubx9FC63hyqnhwnfptZZUwzAqI7R/F6dVrVkbzbvXdeNhbsyfbY6jesWh8vt4a7BzRnz7jrsLhmrZZVY2Z1RwlWdY7lzUDPeW5UEQIBRS4TFwObqAIGbv9jCnYOactfg5sdi4E0GLXcNacbivdn1nk+lglv6N/GqnFFQUFC4kLi4pgcUzijzt2dg0Knp0zTU53q1WsXoTjGsOpjHwROUnp6M3RklNA41nZ2UssztENrsjO0u8kKryAGIaAc5+6Cq+HwficKfJL/czitLDtRb7vHA/5YeZET7KD5ec+TY8i0pRfRvHkazcH/yy+0+B8g5pTZWJ+YxqKUYNuaV21i6N5tL20RQZnVw99DmDGrhfcM0pFUEOaXW40QcobTKyfdb0jiYXUZ+uZ37LmlOTKAfV3WJPRaBDhAZYODNSZ15Z/khBrQI44WxHQj0u7huUBQUFM4e+eV23ltd37fE5fbwxLzd5Jb5rvI72ySEmPjo+u4E+NXOYfobtLwxsTPNws00DjNj1Mm5rnNcIIk5ZexIK6ZRqImM4iovEaeGvHIbszelcVO/JnyxIaXeercHXl5y4KKMHjfoVOSV2dieWuQl4tSQV25j+f5cWkVaeHZMezrEevtcXtY2kruHNKfS4eCZ0e3pFOe9fljbSMZ3i+O7zdLOZnW4mbctnXcmdfH6G1kMWv47uh1L9uaQUVTFi4sOHBNx6jJ/RwbdGgWj06iIC/bjzUldeH1Zotc2H/yeTH6FdytY03B/3p7UGf86rUwBflo+mNKNhFCl0lRBQeHCRanI+Yfi8XiYtz2DHo1CTljm3KdZKN9tSeOTNcm8ek2nP/U821KL6Hs2ZuAq86EkHZoMOGO7jDSryaxwYnd50GsugAhygMh2gAfS/oCWw8/30Sj8CdIKK3G4fA/k88psVNpdxwb6AUYtfxzJ59Hhrfn9UF69JIu6LN+fy7W9Elh7OI/YID+ySq2MaBeFXqsmq7iKDvFBTOvXhEq7Ez+dBpvTzfwdmV77CPTTEWrWU2l3kpRXwV2Dm+Gn1zCifRTrDxeQkl/BJ9O646fXYNCq8TdoqbK7ePWaToT56336DigoKPxz2ZdZ6rOiAiCloJKSSgcRlhO3mJwN/PRaBrcKZ/H9A8kvt+F2ewi3GAgPMKDXaNBr1Xx4fTee/Xk/0/o1Pnau7NkkhJUH8hrc75K92YzpHHPsNTcNMxMb5MeBnFLyyuwcLaikpMrR4OMvVNSoSM4rZ/mJrkEHcpnUM560oipevLoDlXYneWU2/PRaLAYNWcWV3P71Nu4e0pznx3bA6fZQaXPip9ewM60Yk0GLRq0Cl+xv5YFcru4SywtXdUBTPQbzeODrjUdZn1TA+1O6sjujfutVDbllNpb8ayCbUwp5ZuHeenHhfjoNyXnlNAmr9Wz0N2i5vEM03RqFkF9uAyDMYiDSYkCrUea7FRQULlwUIecfyqHcco7kVzD+JPHiWrWaYW0imbctg8cvb33Kxm41ZJaI0fFZcd7P3Cn/hpy5ipxoswq3RyLImwdfIOW0lmhpHzu6ThFyLlK0JxEFNWoVMYFGHrqsFRq1ivSiKoqrHEzp1Yj/LNjX4ON0GjWtoyzMuqU3eq2aQzllOD0eQs0GcsusvL8qCZe7tk1raOuIYy2O8SF+PDisFXqNinCLAbvLQ1JuOSa9BrNew7pD+VTYXVzZMZqWURaiTtLbr6CgoACgO4Xz3flCq1ETE+RHTJAYDxdW2EjKrWB3egmBJh2NQ828OLY9jcPMrDwo4o3b7UGvbfiYdRo1LreH4W0jueeSFhzJK6ewws6/hrVApVLx2A+7zutr/rN4VCr8jTqviszj0WlUrD1cwIuL9h8TsmquN+9e24UAoxarw81rSxN5bWkiNYXZNdsue2AgC+7px+rEPHZllNAxNhC9Vs1NX2zx+XzqBiq7B7cM5/o+jSi1Oth0pJBwi4HRnWOYsVKugQFGLY8Mb02ov56M4ip+T8yjWYQ/MYFGVCoVOo2a2GA/YoNrDaldbg8ZRVUcyi0ju9RK2+gAYgL9CLvIfWysDhd5ZTb2ZJRQbnPSKS6IiACDMimjoHARogg5/1CW7MnGT6epVwrriyGtI5i7LZ3vt6Rz5+DTE012VBsLNj8bQk7WDgiIAf2ZS8OK8pdBwpGSC0jIUamkKidF8cm5WIkN8sOk11Bpd9Vb1zjURKCfjpfGdeSJebvJKK46tq5rQhB3DWnOioO1M6Kd4gK5oV8TTNXpUKFmHWU2mQUtszn56PdknryiNU3D/RnWJpLFe7OPDZo3HSnklfEdWXsoj+fGtOfZX/bx9JXteGLeHpLyas0ew/0NfHZDD5JyyxjQPAyjXrlUKCgonBqtowPQaVQ+qxDbxwYQZDp9o9izQW6plcfm7WZlnYoTo07NW5O6sPT3JO4c2IxRHaNxe0T4BhVLfHipjO4UQ7i/nnHd4hj//npsztq2n8Gtwnnvuq7HWrYuJtR4aBZm5qb+jbm6axwePGhUKoqrHHyyJpnEnHJGdYph+f4crwosj+f/2bvv8Cir7IHj3+ktM+m9QwgktNB7ExQbihUUBVnbqljWn7uru5bV3VV33bWsvTdQUbGgIKIoIr33koSQ3nuf/vvjkoSQCRJISAbu53l4NPO+8+bOJJO5c+6554gAT3yQCaVCgU6tbH5Ojj2vT4gPa9JKOFBQw0UDRFHo/MoGJiUGY9CoaLC3fb/MKK5lfEIga9PLmm+b3j+MSYlB3LFwe6stV9P7h/H0lQN59Ot9vDB7CP9ZeajV9jg/o4aFN4+if4SlzdZ/p8vNnrwq5r69iepGR/PtKdF+vDpnKOF+3tWBrEmDzcHqQyXc88mOVq/PmSkRPHxJstcHqSTpXON97yxSp/hhfxGDo33RnETaqEWvYUyvQBZuzOrwPu8tmeWEmLsi0u+G/J2dmo0DogW5XkXPq5MTOkAErqyeOytIPVuIWcez1w7m+MVEnVrJv64ehNsN/1x2oFUQB2BXbhVRfgYmJQYDMC0phHlj4/jHt/u5/cNtXPv6Bq54dT17cqv5bm8BizZl8/SVA8GtYOnOfG6f1IvgY7Loaq0O0opr+ccVA3hs6T6m9w/nldXprYI4IGof/H7hNlJi/WUQR5KkDgk263jyioFtbvfRqfn3VYNadQ3qLg6niw83ZrUK4oCo0/LKz2lM7x/OrR9u5dYPtnH7h9u48pX1JIebuW9an1bnJ4b6MDjaF6vTzZ2LtrcK4gCsPlTCF9vzuqZGYBezOd38dKiYtKJa7v90J3cs3M5tH27j2ZWp/OH8RK4aEsmMQREeawc9cEFfXlyVTr3NwUMXJ7V57zNoVPzpwr68tz6Tz7flcrikjuGx/gT6aHly+QEevqTtffQaJX3DzdwxOaE5GKhUwOwR0Tz81d42dXO+31dIflUj905N4KPN2W3GWVlvZ+47myn00JmxsKqxTRAHYGdOJf9ZeYh6m6PNfbxBflUjd320vU2Q9aud+Xy3twB3e3siJUnqkeQM/RxUUmNld14Vd0w6+SDI+clhrPl6L7+kFnNev9CTvt+mI2X0CzOfyjBPrCoXGso7pe34sRQKBeE+CjJ6WiAnbCC4HJCzERKmdfdopA7SqlVMTAzm+3sn8u76I2SV1dM3zMyVQ6NotDvIrqjnUFHbgsYj4wP4YkceU5NCmDE4nCh/I3Pe2tQqoFrd4OCRr/fyxo3D+OFAEQ98tpsPbh6Jw+3mL1/s4emrBrI3r4rNmeX4GbQkBPvgZ9CSWVZPSrQfL//ctigpQF5lA9VeWNdBkqTuZdCouGhAGP0jfPlgQybZ5fWM6RXIZSkRRPn3jOKxJTVW3l+f6fHYPVMTmffOZuqOyaBstLt47sc0XpidwtwxMWSW1jNzSCRDYvyorLPzy6ESHO0sdH2yJZsrhkZSUmP1qvbSlfU24gJN3LFoe6vbC6sbue+TnXx793jWpBbz9rzh/JpWwvbsSsJ89Vw0IJwfDxSxYl8hvYJM6DRKFt48iq935pFb0UBSuIWJfYJ4/sc0csrF4sWa1GKuGxnDXR/tAMCkU/P2vBGs2FtAbkUDfcPMTE4M5n8/pVNZb+e5a1PYmlVBg83Blsxyj53IABZvzuaNucP5t4fuiwDldaKG0fEZNmnFNW2COE2W7srnvmmJGAO87yPUN7vy232uXvslg+n9w36zRbokST2H9/0Vkk7bL0dbPQ6O9jvp+/QONtEr2MT767NOOpBT1WDnYEENEycGn8owT6xgFyhV4B/X6ZcONSnJqOxhgRxLFBgC4MivMpDjpYxaNYlhZuKDTBjUKiL9DKiVCvRqFTqVkjsn9WZAlC9KBdidbvQaFYEmLd/syqfB5mRgpC+fbc1tNyvuky05XDYogoWbssksrcOkVXGgsIZ/rzjEHZN7MzI+AIfLzVc7cpnQR3S6snvo/HGsiuO6e0iSJJ0MH72G5AgNf585AJvDhcvlpqrRTn5lA2a9utvrcThcbo8f1JPDLRwoqGkVxDnW/1al8ddLkrD2cmLUaWiwOYnwN5C3SxRGHh7rz9XDozDrNNTbHHyxPY8NGWU4XW6crh42r/gNbuD1NRkej1kdLr7dnc/QaH8ySmsZlxDI7BHRVNbbyatsYEiMH4eLa7E6nfSLMBNs1nLlkEjcQIPNyQcbs9iVW9l8vfEJwc1twwG+3V3Ayn1F/G5cHBP6BPPZ1hzeX5/ZHISY/94WRsT5MzMlkm1ZFe0+huIaKwoF7QYvAIpqGnG53BTVNGJzuNCplSd8b7Q73Vgdnn8/OlNZrZU6qxOVEgJ9dCdsTHKyso4r/nys4ppGr+yuJknnMhnIOQetSS2hV7AJX8PJ71NXKBScnxTKG2syyCqrIzbwt+vSbDhcihsxMep0+TvBLwbUnT8ZjDAp+DW369+kO0ShENurjvzS3SORTlO0v5F+YRY+2ZLNk98dJNrfwNNXDSK9pJbX1hzG5YZR8QHcPqk327PKmZAYzL9XHKS4xsrBwrYp7E0ySmoZFusPQHpxLVH+Bv4wrQ8hFj3P/ZhKVlk9Jq2Kq4ZFkRxhQadWtqlfcLzIHrJ6LkmSd9KolORWNPD0dwf5YX8hLjeM7hXAYzP60yfEp1u6AjmcLhSIBarDJXWtjkX46dtsNT1WRmkdEb4GHv5qL1uzKugfYeG1G4YyKj6AKH8DVoco7FtSYyXQpOWG0bHMHhmNTq3E6GXbVI0aNRkneC72F9QwoU8wv24rJdgnihX7inh19WHK6mwEm3XcOiGe8QlBrE0rpc7q5JXV6eSUN2DWqblmeBT/mz2EP3y6E7vTTZ9QH9KLW38vm9PF4q05PBJmZk1aaZvvvyWzAq1ayWWDI/hiR57HMQ6K8sXlEu3EqxvaBu4UChgY4ct7GzJ5+ad0MXYfHXdN6c0jlybx928PtLlPoEmLqQt/lvU2B3vzqvjb0v3sL6hGp1ZyxZBI7pnap7lI96malBjMl+0+V34YtD2kNqQkSSdF1sg5x7hcbtamlzIg4reLHB9vbO8gfHRqPtiQdVLn/5IqWiJ3epqm2wmFuzu9Pk6TcB8lJQ1uamw9bGUifLDIRGqo7O6RSKchKdzMHz/fxfI9hThdbh6d0Z+7Fm1n5f6i5lXDTUfKuXPRNkb3DuKORds4WFhDYVVjq5apx4sLMjXv9Y8LMqFTK4kOMPLQF3uaV+HqbE4+2JDFE0v38+y1g/l2dz43jI71eL0LkkMJ8pFdLCRJOnW5FfVc89p6vt9X2Pz3bWNGOVe8su6E2QFdpaCqged/TCO7vM5j84aCqkZ6B5/g72ygiQ0ZZWw9mgWSW9FAg91FcoSZoupGnvn+ECU1ooV1WZ2NF1alsSOrApNWjaUDi2c9gUalIO4E7znxQSb++0Mql6dEYjGo+ceyA5QdzeIsqbHy5PKDbD5Sjk6j4qEv9jRvo6qxOnhnXSafb89lwZQElApRV6mXh+e9ot6O0+Vu92dy8YBwQsx6QtrZsnbbxN68+ethbhnfy+PxB85P5MONWTzxzf6Wsdda+ds3+ymobOSywRFt7nPvtD6EduH2owMF1cx6YyP7C8TCjdXh4pMtOcx7dzNFHur5dMSI+ACP2/sUCvjLxUndniknSVLHyEDOOeZgYQ3ldbaT6lZ1PK1ayZR+ISzekkNN44lrZ7jdbn4+WMLAqI5/n99UdhhsdRDg+Y35dEX4iJdFj9teFT4Y3C7IXNvdI5FOw8aMcoqqxUR/WKw/O7IrmieQx2q0u3h77REu6h8OwI8Hirh0UATtdbGdPSKGpbvyCTRpSQjxoV+4hX+tOOjx3LWHS4nyNxLupyc+yMidk3tj0YsVRp1aydwxsfx95gA5qZMk6ZS53W5W7iuitNbz37fX1xym0UNnoq5SWNXI/He38MP+Ivbm17Azu5J/zhxAuK/4UK5UQKhFzwXJYRjbyUy4fWIvPtmc0/z11cOi+HBDJrVWZ7uLXB9uyqbG6n3FcQ+X1DF3jOdAv1al5KIBYWzMKOPVn9PZmVPp8Txfg4b/rUrzeGz1oRJSYvx4b/4IjpTUMm9MnMfz/rvyEK/dMIyLBoQ1t3EPteh4bEYyGaV1vPxzOu/OH8H4hMDmAskxAUZev3EYy/cUsHRXAcPj/Pnj9ET8jxZJ1qqUzEyJZEq/ED7Y6Pnn9sGGLK4fFYNGJS4aaNLy5wv7ogAqu6h+XHmdjSe+2Y+nmsNpRbUea+l1RKSfgU9vH8OEPkHNt8UEGHnvphEkdUU9S0mSupR35XlKp2394VK0KiWJoaf2B3t6/zCW7Sngk8053Dqx/UDKzpxKCqsbGRbjf6pDbV/BLlBpwTe6868NRBxtQX640sngkB6UZmoOE+3WM1ZD0qXdPRrpFNgcLn44UNT8dXK4ha2Z7e/v35JZztwxcXy1M+/oqlw2/75qEP9YfoDKejGR9NGp+b8LEvk1rYSLBoRxw+hYVEqobrQ3B4w82ZZVzuj4AEDB8Fj/5sLLBo2aCD89Bi/bBiBJUs9Sb3O2+nt3vHXpZVQ32Dul9sfJ2JNXxcHCGmamRLI1s5yV+4sYGOnLvdP6YNFrUCkVrD9cxtPf7WfhzaO4c9F2CqtFBoROreSOyb2psTpafZhODrfw+i+HuXRQRLtbVJ0uN2W1VhJCfM7I4+wsy3bnE2TW8YfzE3l1dTqNdvH4gn1EEKWmwY7bDTVWJ8NjAxiXEMimjPJWRZ+1apXHhYomhVWNbMus4JYJ8aiUCh66qB/P/pDa/FyGmHU8N2sIxdWNRPkbeGXOUNxuMOvVzYG3XkEmnC43ExODuWNyAgEmDQEmLd/uKmDp0dpFFXV21qWX8cilyeg1KhQK+OlAMZll9e3WhbE5XdidLl68bigut5sGu5NPt+Sw6Ug55/ULAVPnL3Q02Bzsyq1q9/gvqSVMTDy9upPxQSZevn4o5fU2HE43Fr1aFjiWJC8lZ+rnmHXppfQNM6NVn1oyVoBJy/iEIN74NYMbx8S2OwH7dncBfkZN19THKdgFAfGg6ppfX71aQZBBQXpFD8vIAQgbDIdXdfcopBMorGpgd24Vy/YUEOij5eph0QQatRTVNLInr4rAYyZ/dTZHqzaqw2L9sRg0pBXVkl1ej59RQ90xK7nf7yuivM7Gp7eNobLBjtPlar7e4ChfvtiRxxtrMrhkYDhxQSaUJyjyGGDS8dnWXFbsK8KoVfHUlQNZuDGLt+YNl0EcSZJOm0alPOH2TD+jBrXqzLTldrncfL1T1AapsznwP5ptuCevigeX7Gk+b3SvABZMSeBAQRVPXjEAo06NSqHAz6ghq6yOf61o6X4UcDT7cd64uHY7VjXxxtojIRY9r6w+zPnJobwwewhOlxu1UkFNo4NFm7KYNSKGV28YSn5lA++sO0KfEDO/GxfP0l35fL1TBFA0KgUKBR4zTABUSgXf7y9iRkoEe3OrmNIvhGGx/tQ0OjBqVfgZNdQ02NmYVcmbvx7hzV+PtLmGr0HD/2anYNKqCTHr+PlgEbvzqhnbO5Bv7x7P3R9vx+V2sz27gvWHy1rd94L+J27eYdCqqKy38/g3+5ozy3x0atQqBTnl9aw+VHx0G7OJFXsLqGqwc+ngCAZE+hJ2CsERpVKBWaduN4Mr1NI5Xc8sBo3XbfWTJKktOVs/hzicLjYfKedSD3t+O+LywRH8mlbCok3Z3Dw+vs1xq8PJlzvyGB0fiLK9fSCnymmDor2QcH7nXvc4ESYF6T1taxVAxBBI/Q4qMrukY5d0evIqG7jxrU1klLYU0DRq1dQ0OFiTVsLDFyfRa0gUn27NBcR2qSevGIgbmDU8mo0ZZZTWWrl5fDyBPlqcTjf/+6l1WvqIuAAWb83h7bViQnvbxF7YHC7eO6aV7tJd+TxwQSLnJ4Xy/f62K+IGjYq+YWb+77NdAMxMieS7vYXcPqk3vga5nUqSpNOnVSuZPy6eb3YVeDx++6TeBJjOTDtupVLRvH3017QSXrxuKJ9uzWl1Tv8ICzeMjuXm97e2yq4xaVW8O38kRp2amyfE8+CSPWhVSp69djD3Ld7JkdI6Hr00mYSQtgV7AaL8DVj03veh+dJB4byy+jA/7C/ih+PeR+6dmkCEn567P97RKvPz/Q2ZPHJJMo12F9/vKySjpJYJfYJYk9q2WLFZJwIvL8xOIbO0nsOldVj3FTIoyo9Xfk5na1YFgSYtr984jNG9AtsNCF09LIp312WyOrUEP6OG52el8NbaTL7dXUCgScu7N43g483ZXDY4ss3PvKTGSnSAobl+z7F6B/uwJ7eK99dn8t9rBnPbh9uwOlzcPaU35XV2Zr2+gVkjYnC4XPzpmGDgVzvz6Rdm5t35Iwj37Vhx4iAfHXPHxPLyMR28migUMC3p5LrGSpJ0bpA1cs4he/KqqLM56X+aWTLhfgYmJYbw4k9pHtsTL99TQHmdrWvecIr3g9MOgQmdf+1jRJiVpFb0sM5VAOEpou162g/dPRLpODaHi9d+OdwqiOOjU9M/wsLyPQU8NiOZPy3Zzcr9hdw1Rfz+Vjc4qGt0MK1fCDe/v4XX12SwZHsejy3dx5PLDtA/0kKwT8sHHaNWxaAov+Ygjl6jZHisf6sgTpNXVx/mzikJ9DquWKVOreSl64ewcq8otjww0pfzkkIIMGoYEu3X+U+MJEnnrF5BJu6b2qfN7TNTIhjbK/CMjmX2iBhA1Of5Na2Ee48b1y0TevHo1/vabJESReKPYLW7MGpUXDQgjAsHhLFsdwFHjv69f/PXDB6+JKlVxiWIbJGnrhjItqzyLnxkXcPPoObRS5Ob6840GRUfwMQ+Ibyy+nCb7btuN/xz+QFmj4gm0KRlaKw/t0/sTUxA6w6Ieo2S/103BKVCwT2f7OCRr/fy+bZcnv8xjTsWbuPmCfEkh1soq7Px1HcHya2o55FL2o5lWKw/I+IC+CWtBIDKejv/XZnK3KP1dsrqbPxz+X6iAgxM7x9K/4jW8983fz3Ci7OHNGfGNgk0aXnk0iTeXJNBZlk9X+3M56IB4QyP9eeSQRHc8/EObE4XI+L8eXddZpvn7mBhDQs3ZJ+wjbknGpWSuWPiGBHbuiyBUgHPX5tCmK/cAiVJUguZkXMO2ZBRhkGjIv4EHRlO1rXDo9h0pIx/Lt/Pf65Jab7d7nTxwo9ppET7Eel/em0SPcrfBTozmLt2VSLKrGRlpoNGhxu9+sykfp8UrRFC+otAzshbu3s00jHKaq18dtxq37iEQH7cX8SNY2J57odUyupsvLsuk9kjonn3phFsz66gT6iZa1/f0GYLVH5VI08uP8iC8xKYU2/nUGEN/SPMrEktaT5ndK9Afj5U7HE8dTYn93+6k7fmDSezrI4tmeVE+hkZ3SuQslorLuC9+SMwalX4GjT86cJ+srixJEmdys+o5Xfj47l0cAQ/HyrG5nAxpW8wYb4GArqgxsiJRAcauX1iL15fk8EHG7K4ZngU7940gt25VTjdLiJ89ZS3U8/l0kGRLPhoO1aHi8dmJDOqVyAzXmxpPFBQ1cjflu7j7zMHUFFnI7einugAI8FmHVaHk9d+yWBcQhBhHczQ6E5Wh5s9eZUsu3sCP+wvpNHuZFLfEJwuNy7c/HLMe9GxnC43BVUNPDdrMHmVDfjo1PznmsGU1VrZX1BNkI+OpHAzfgYNdyza0aYtuNXh4uEv9/Lwpcn8YfFOtmVVcNvEXmzbX8w780aQWlRDvc1Jn1AfSmtt3Ld4R6tMnT15Vdx9Xsti36YjFfz1kmTWp5fyv9lDKKm1simjjBCznrEJgYRadCy7ewKbjpSxL7+a+CATASYtj3+zn/yjXaKW7yngm7vH42/UUFFvJ624lkmJwe0+BwCLNmdx45iYDv/MQ331vHLDULLLG1ibVkKgj47xCUGEWHRe18JekqSuJf8inEM2HC6jb5gPauXpJ2L5GbXcODqW19dkMCTGnzmjRGeDF1elkV1ez+8ndU1rcPK3i25Viq5NJos2K3C54UiVi6TAHra3PXIY7PoY7A2g8Z5J4dnODc3FIJvo1CrqbE4GRfny7A+pzbd/siWHT7fmMKVvCEE+unbrK6w+VMxfLupHsFlH72AfAowalu0pbHX9elv7mWO5FQ0cKKgmzFdHoFHLoEgLRo2ShF6BjDrDq+GSJJ2bmupxdHexX3+jljsm9WbG4Ag+25pDdaMDP4Oay1PCqai3o1Qo+OslSbyz9ggFx7R5tujV1Fkd1B39W/vI1/t4dc7QNpk7mWX13LloOxG+euaNjWNjRjlLd+Xz2g3DsNpdOJwnrqPT0zjdbgoqG/lhfyHf7ytCr1Hy4cZsovwNPHhRv3br3gA02J3kVTRSVN3IoGg/lArwNWrQqZV8sCGT2EATt0/sxeGStlvRQGTSGDSq5jpvDqebZXsKWL63gKHR/jx55QCueW0D1Y2ea8nYj3uu621OrhgSRaivnt4hPow+7v0v1KKgqMbKjuxKvttT0BzAaWJ1uDBqVQSb9c3t5XVq5Qnff+utzhM+RycSbNYTbNYzLLYLGoZIknTWkIGcc4Td6WJLZjlXpER22jUnJQaTWVbPX7/cy9bMChTAFzvymDUimtjA08/6aaOxSrQeH3h151/7OJFmEShKLe+BgZzokbDtXcj4Bfpe2N2jkY4y6dSM7R3YqpjizpxK/nJxP3x0bf/UutyQXV5PZX37HT1cbiivt3PTu5uptzl58KJ+nJ8cyre7Rc2J3bmV3DutT3NhyeNNTAxmS2Y54xKCeXPtEWKDTJh03lerQZIkqTP4mbT4mbQMiPSlst7Gt7sL+Nd3B5uLy/YLM/PkFQP5x7IDzUEGEZBvHTDIKK1jdK8ANma03TKVX9VIuK+eV38RdU4a7U6mJgUTbD4z9YA6i69Bw18vSeKLHXnsL6huvv1IaR2BRws9e6oJBBAXaMJHp+a7vQU8f7T9uEal4Nrh0bx8/VAq6m0oj98ndZxGuxO1UhTMrrGKLo1uN+zIqTjaLctzECfYrGvV1j7K30C91YFO43kBsLLexvI9BUT7G9ie7bmL5Kh4/+b3cT+jFj+jhl25lfzfBX35ckeex/tM6ReM2SA/ZkmS1HVkjZxzxO7cKhrtLpIjfDvtmgqFgnljYrlpbBybjpSxIaOM+WPjuPw0iym3K38H4IbAtvvtO5tJoyDQoOBQeQ+sk2OJAkskHFrW3SORjtE06dWqWv6s5lc2EOVvJKeivk3tBBCBnOSI9mtW9Qoysb+gmnqbk8RQH8w6NU6Xm35hZkCk86uVijb7/kHUILhhVAx+Bi1f7chjSt8QQi16jF7YPUWSJKmzbT5SzsNf7W0VEDhYWMMfPt3Jny/s23xbWZ2VwVF+re4bH2Ti95N6t/p732RYrD9VDQ4q6+2olaLj1Q1j4tCdoTbrnUWlUPD80a3yx/rduHg+2pzNvVP74KmfxfT+YUT5G/j7sv2sSWspcmx3ulm0KZuFG7P4eHMOxTVWj4scAGqlAotBg83p4r7zE/lwQ1bzsfEJQaQX13Lt8GiP971vah8Wbmw5/5FLk4nwM7S7dXhdeil/+XIvpbU2RsS1zYDRqBQ8cmn/5vuHWvQ8NiOZomorCmj3/ffeaX3wkQsnkiR1IRnIOUdsbKqPE9S5mTIKhYLp/cP47zUpPHttChf0D0PxG6sspyxvK1giQG/umusfJ8as4GBPDOQoFBA9Cg4uA1cPHN85rE+oD0vvHse0pBD0GiUzUyL4bFsOizZmc7eHgp9Whwubw8XUfiFtjikUcO8xE9LZI2J4a+0R/vbNPh6Y3pdbJ8QTaNLyr+8O8eiMZBacl0CQjxaDRsUFyaG8M28E9TYHCgX8klrCvLFxFFU1EuJlq8KSJEmdrbTGyr9WHPR4rLLeTm5FA0nhZvpHWHj9xmHUWu3MHiECB4EmLQ02J6//ksEbc4cxpW8weo2SELOOOyb15pYJ8fxz+X4A5o2NIzbAiAo3tUezSrxFUY2VVQeLqWl0tCqEPyI+gI835/Dt7vyjHaUC0KmVRPkbuP/8RO4/vw8VdTb25lV7vO5n23K5aGAYn23N4Y7Jnrfh3zA6lr15VbwyZygbM0TtGgCtSsnd5/VBo1IyLNafhy9JIj7IhE6tZEi0H+/OH0FOeT37C6oZHuvPJ7eNpnewqd25b3FNI//+XrSUf2r5AW4aG8+dk3sTatGh1yiZ0jeYpQvGkxjasi1QpVQwtV8oi24ZxZLtufxhWiK3TezV/P57fnIor98wjNUHSrA5emD3U0mSzhoy5+8cIerjmFF1djvwM8XthNytEDn8jH3LaIuSzQU9NFASOw72fQFZ6yF+QnePRjpKq1LRL8zCc7NSqKy343C5eOCz3WzLqqBfuIXXbhjG/1alcaCwmghfA3NGxVDdYGfG4AiSIiws3pJDaa2VIdF+/N8Ffflkc3Zz6nqoRd/cIeXWD7YyqU8wD17Uj0h/A243nN8vlEl9gvA3abE6XGw+UsbHm3NICPZh4c0jqWt0MLJXICoPK8iSJEnnEpvTxeGSunaPpxXX8r/ZQ1ifXsb/VqUzPiGQ2yf1IiHEh3XppRwuqWVDRhn78qu4algUt0/qhValYtGmLO5cdJhofyMLzktgeKw/V7+2nhevG4pRp/GqDI3iGlEn5snlB3huVgqbj5SzZHsuDUfrwny/r4itmRVcMzyKa4dHU93o4OsdeSQeLULcHqvDhdsNq1NL6Bdu5umrBvL2r0dIL6kl2t/IHZN7Myo+gJpGBz/sL+LHA0WolArG9g5k/rh4XvvlMAvOS8CgVRHlr+eJy/sTE2A82uLdRVygkVkjo9GrVZh0aiwG8ZxX1Ntwudz4GTTN74N2h4ussvrmcd310XZG9wpgwZQ+GHUqdColSR46vVoMGpLDzVw0IJzqRjuDIn3pH2HB4XKzI7uSOxdtJy7IxHWjY9CqZRMBSZK6xlkRyHn55Zd55plnKCwsZPDgwbz44ouMHDnS47lvvvkmH3zwAXv37gVg2LBhPPnkk+2efzawOVxszSrnyiFR3T2UU1dyCKw1ENz3t8/tJDFmJUvTHVRZ3fjqelgALCgRfEJg35cykNMDmfUazHoNNoeTuCAT27IqWLQpi6uGRvDnC/tS0+igvN7Gkm257MqtAmBkfAB/mNaH4bEBLNtTQEW9jbXpLWnplfU2wix6CqsbmyfBq4/pmPHABX1Ztief9OJaLkgOY2pSCH+c3pekcAsGjYpAH5mJI0mSBGLrToSvvk1R2yaJoT7YnS42ZZbhcrsZ0zuInPIGMktrWTAlgeJaUfC2utHBu+syeXddJr2CTFw7QtSACffT8/CXewny0VJWZ8fXoEHvZVurAkziPaPe5uT3C7cxISGIhy5KIuqYjqRldTZe+yWj1f20auUJO5KplArURxcVX/slg15BJl6YnYLLLearH2zMpKrBzr9XHGRy3xAeuSQZvUbFrtxK7v90J5X1dq4bGcMrqw9zxZBITDoVSWEWGh1OVu4rYvGWHNy4uWZYNBcNDMPqcLI2vZR312XSYHNyyaBwrh4aRVSAEZVKZFIV17S0Ud+YUd5c++jpKwe2+zh0ahXrD5fy/b4ij8fjg0zo26nLI0mS1Bm8/i/M4sWLuf/++3nsscfYvn07gwcPZvr06RQXe27Ju3r1aq677jp+/vlnNmzYQHR0NBdccAF5eZ6LlZ0NdudWHq2P034tjh4ve4NoO+7reU90V4jxFS+P/WU9MCtHoYDY8SKQ4/SudO1zRXWjnW1ZFVw+OAKFAu6Y1JtvdxVQb3ey4OMdPPr1vuYgDoh6DU+vOEhqUQ0vrErj483ZXH+0GxzA59tymTsm1tO3Qq9Rkhxh4UBBDfaj3T3u/3QXDpebKH+jDOJIkiQdI8Si5x4P211BdCOKCTCyZHsu901N5InL+nPnou289HM6QWY9DQ4XCcE++BpaZ9dklNbx9HcH2ZZVwUs/peN2Q0ZJHQMiLfge7dzlTULMOvqGiq3sbjesSSvlz0t282taKUNjPHdTGhHnz57cKuqtDmICjB7PubB/WKu23RmldbiBWW9s4K21GQyJ9ifK34DLDT8dLOaPn+/m7o938NavR6ist2PRq6m1OtieXcEjX+/lp4PFWB1Obnp3M48t3cf+gmoOFNTwxLf7ueGtTaQW1fKHxbvYnVtFWnEtz/+YxhWvrie7vJ5Qs65Vq/JjGTQqogOMFFV7DvYZdWrumOz5vgC3T+yNQXNWrJdLktRDeX0g59lnn+XWW29l/vz5JCcn89prr2E0GnnnnXc8nr9o0SLuvPNOUlJS6NevH2+99RYul4tVq1ad4ZGfORsOl2HSqojvik5SZ4RbbCEK7ged0Dr9ZEWYFGiVsK+0BwZyAHpNhoZySD97f3e9ldPl5ueDxVz35iaWbM/l75f3Z1isP2+vy8RXryHIx/Nq5S3j4ymrs6JUwLr0MmICjFw5JBKFAnbkVGLSqbluRHSrApOBJi3v3jSCd9a1rIqqlQrum9aHUfGyxbgkSZIn5yeHcvP4+FZ/TwNMWp6blcKrqw/zzrpM8qsaqLU5MGpVbD5STohZh1oBb6w5zLvzRxDuq291zUsGhjMg0pf8igYeuTSJVQeKeWH2EGqtDqyOHjqXaIfb7eKRS5OagzlNtmWV8/RVAxkU2bp5xuAoX+6aksDba49QXGPl2WsH0+u42jTjE4K4PCWCz7flNt82LNafrNJ6Lhscyff7igj306NWKghuZwHiupExrTo1fru7gJyKBnLKG9qce7ikjq1ZFW0KNpfUWHn71wxsDhcXDghnzqiYNu+rz81K4T/fH2JPXhXt6R1s4t9XD2qVeaPXKHn22sHEB3kOZEmSJHUWrw4V22w2tm3bxkMPPdR8m1KpZNq0aWzYsOGkrlFfX4/dbicgIKCrhtnt1qaXkhRuQemt9XHKD0NNISRedEa/rUqpIMaiZF9pDy1WF9BL/Nv1kWxD3sMUVTfy+Dei2OXXO/Px1WuorBeZU3/9ai8vXT+UB5fsprLBjkappLzexuzh0YxLCOJgQTVvzRtBRZ0Ng1bFnNExzBoRTW5FA2a9momJQaJwcXUjBq2aIB8tP+4vZNbwGOaMisWsUxMTYCLIR4uxnY4gkiRJ57pAHx3zxsQyPiGI8nobBo0Kq8PFyz+nNxfXTS2qIbWwlutGxvDCqjT++tVevr5rHJ9tyyPApOPl64dSVmfF5YaYAAN2pwu3y83Lc4ZS3WDn95N64XaJ7Un1Vgc6tfdsryqssnLf4p3cNSWBKH8jjXYn0QEG1qWXcdM7m1lwXgJPRAzgSEktZoOGjJI6/rB4J5elRDC5bzCpRbXcMbk3fkYNjXYXvYJM7M2v4p5PdmA9WgR4eKw/C85L4P8+3cVjM5JZtjufvXlVRPvr+ejWkdzzyU4OFNQAoFTAVUOj6B3iwxu/tt7O9f2+QobF+JNaXEN1g4OGY9qPr9xXyJR+IezMqWx1n6W78rljcgIKheg6+fa8EZQffd9ttDt5/sdUDhbW8PHmbCb0CfL4szPrNVw+OIIxvQLJr2xAoYAIPwMhZh1aL/pZS5Lknbx6ll9aWorT6SQ0NLTV7aGhoRw86LkbwfH+/Oc/ExERwbRp0zwet1qtWK0te2erqz1X4e+pGu1OtmdXcP3ImO4eyqnLWANaEwR67m7QleJ8lewt6cGraL2nwrb3oLYEfIK7ezRdypteixX1NsrrWoo97sipZGKi+PkcKa3j8205vHjdEAqqGrE6XPQLM+N2uzlYWEOwWU9lvY304lpG9QrgQEENSgWE+xr4fl8B8UEm/rXiILtzq6izOnh2VgpPfie6bkT66fniznGEWvQexyVJncGbXouSdCJOt5v5723BoFFhd7pwuNytjvvoNEzsG0RCsJnduVX8fKgYh1Oc8/qaDHbnVXHXpN6E+ekprbGi16hwumFvXhVBPjrK6myENmftdP5iWle+Ft1Aaa2Nx7/Zj1IBkX4G7pycwDNHuzz95cu9RPuL23z0biL89Cy8ZRQK4LKX1zU/T0oFTEsKJdxXT4SfgU9uG0Od1YFBq0KlULAvv0p0lgr24amrBuF0uugV7ENlvZ0nLhNtv+usoq7cTweKeXDJbtzH/JjMOjVT+oYwKTGYI6V1BPnosDlcvLAqjezyepQKBa7jfq5iXApcbjc2u4ulu/J5ZfVhj78HKoUCxQl+drqjW7Ci29lKVl5no6TGSnpxDYEmHTGBRsIseu9dXJUkqcfw6kDO6Xr66af55JNPWL16NXq95w8+Tz31FI8//vgZHlnn2ZJZjt3ppn+E72+f3BO5nHD4ZwgbCMozv7oR76vkp2wH9XY3Rk0PfNPtfR5sfx92fAgT7u/u0XQpb3kt1jba27QcPVBQzX3TRD2GmSmRDIj04+rXNjSvSioVok1tkI+OZ74/xB+nJ2LQqrj5/a04j04oNSoFf7k4CbVSwbasCqobHQDNRSMBZg6JIvAERSYlqTN4y2tRkn6Ln0HL4CjfVvXKmigVEOGn56Z3t6BTK3no4iQCTBp0GiWj4wPYeKSc2gY7Rp2aW97fyuOX9efxb/c3Z5AAxAQYeePGYRg1Sky6zp/DdOVr0degIdCkpazOhssNORUNRBzd9tQU6MipaOChL/cA0DfUh2uGRzO2dyB6tYpap3iPcrlhXXopL143BLVKyd0fbye3oqE5GDM9OZS/XJzE/Hc3t+p2NSo+gJvHx/PAZ7v477UprDpQzKJN2a3GaNCoeOG6ITz3Yyo7siubbw/31fP0lQN5+Ou9XDwwjG93F7R5fFcNjSS1sIb31h/hwgFhvPXrkVaZPE3mjI5Bqz61bf1F1Y08uGQPPx9qqdvpb9Tw/vyRDIj0lcEcSZJOi1fXyAkKCkKlUlFU1LpifFFREWFhYSe873/+8x+efvppVq5cyaBBg9o976GHHqKqqqr5X05OTqeM/UxZm16Kv1HTqsuAV8nfJurAnMG248fq7a/E5Ya9PbVOjs4McRNgy1twdNJ0tvKW12J2eQNbsypa1RVwuNysP1zGTWNimTE4nMe/2dccxAEx0X13XSYBJi0p0X5E+Bl59ofU5iAOgN3p5vFv9pNZXs/NE+IBkZbetH8/OsDA9aNiUMv24lIX85bXoiT9Fn+TlqeuHIRR2zbIcsfkhOYAgNXh4vFv9nH9qFgq6qw8MiMZs07NwzOS+d37W5gxOIJXVh9uFcQByC6v566PdmDUqbtkq01XvhaDzFr+ecUAVMcEGxZvzeHhS5LanKtTK7n/gr7sya2iuLqRv8/sj+KYGEWdzYlRp+JvS/eRU97QKqPm0sERzH9vS5uW5ZuOlPPd3kKGxwVw3+Kd3DA6ts1CxeyR0Xy0KatVEAegoKqRh77Yw0MXJTEoyrd5q1yTKH8Ds0bEcPMHW/klrZQxvQKJC2ybUTMpMZjk8FNbCLU5XLz2y+FWQRyAino7c97aREFV25o+kiRJHeHVM36tVsuwYcNaFSpuKlw8ZsyYdu/373//m7///e+sWLGC4cNPHCDQ6XRYLJZW/7zJr6ml9I/wRaHw0qj/gWXgGwmWiG759lE+CnQq2FXcQwM5AEmXQXUeHFja3SPpUt7wWrQ7Xby3/gjvrj3C01cNxKJvSXp8e+0RhsX6s+pgcatJ7LE+2ZzNHZN68enW9ifjS7blMqlPMIEmLQ9M78vevGr+fnl/Ft82hkg/Lw3YSl7FG16LknSy+oaZWXbPeG6dEM+QaD8uSA7lpeuHYHO4WhXldbvF31+tWs36tBK+umsctY0OKuvtDIryY9ORco/XP1xSS0W9naqGzu8w2ZWvxXqri19SS3hr7nCuGhpJSrQfWpWKUfEBfHzrKC4cEEZKtB9zRsXw5tzhLN6czXWjYtBr1fx0oJg35w7nssERpET7cc2wKLQqFRmlda2+h59Rg9XhpKLe83OzbHcB05JDya1ooLimkTfnDuPOyb0ZEu3H5MRgrkiJZNVBz11q86samztvvT1vOOMSAhka48/jl/Xnk9tG8976Izhdbtxu+NPnu/nzRf344/S+DIv1Z0zvQF67YSjPXDOIYPOpdX0srmnk483ZHo/VWB3sL5BbUiVJOj1ev7Xq/vvvZ968eQwfPpyRI0fy/PPPU1dXx/z58wGYO3cukZGRPPXUUwD861//4tFHH+Wjjz4iLi6OwsJCAHx8fPDx8em2x9EVSmut7C+o5o5JZ762TKeoyoG8rTDwGuimQJRKqSDeV8nOnhzICewN4Smw9jnof0W3PVcS2BxOaq0O/ji9L35GDc/PHsLevCoOFlYT5W+kf6Qvnx7zweB4+VWNmPUaCqs8tzsFyK9sRKtS8vWCcQSYNLwwO8Xr2tpKkiT1FCqlgvggH+6e2ofyWisfbMjiL1/uobqhbZZrdnk9jXYn723IZkisPwWV4m+13XnipgjVDXasdid40d9qu9PFF9vz+GJ7HtOSQhkRF0BRdSMzX1nPjMHh3De1D0dK6zhSWsuBgmrmjY0jq6yOwqpGvtldwA8HirggOYwRcQEU1zRw5LggDoCfQUNxtdXDdxdsTlfzwkdFnZ2qejujewUyoU8QhwprKKhqbHdhBER9moFRviSHW3j6ykHoNSrMehUlNTamJYVh0Kr5bGsOZXU27li4nYGRvoyMD8CsVzMiLoDAdjpnnQybw0Wjvf3fC09dtiRJkjrC6wM5s2bNoqSkhEcffZTCwkJSUlJYsWJFcwHk7OxslMe0rH711Vex2WxcffXVra7z2GOP8be//e1MDr3LrU0rBWBQlJfWx9m9GPS+ENb+1rczIcFfydbCHhzIARhwNfzwMKT/CH3O7+7RnLOUCiX3n5/I22uPUG938urqw/gbtcQEGNmVU8nY3gEMj/Xn16OvzeP1j7CQV9lA/wiLx0lv0zn+Jg3hfrK1qSRJUmex6DUoFZBTUe8xiAMwMNIXg0ZFaa2VBpuLxDCxAKhRKVvVjjlekI8OHy/rImjUqugfYWF7diXL9rSuMfP5tjz6hPgwtncQy3YXkF5Sy8HCGkb3CiDx6LbixqNFhJvMTIlq8z2Kqq3EHdei/Fh+Rk1zzbn4IBOPLd3LzpwqLAY1Vw+NYkR8AAaNymNtGxAdpJ5ctp/FW3NptLsY3zuIBVMTeHFVGuszyhjTK5D/XpvCKz+nszWrgj15VezJq2LmkAiMp1nTyKBVEeyjo6TWc6BqQKTMZJQk6fR49daqJgsWLCArKwur1cqmTZsYNWpU87HVq1fz3nvvNX+dmZmJ2+1u8+9sC+IArEktIT7IiJ/RC4uflh+BjF+g1yRQde/kp2+AksI6N3k1PbQNOUD4YAjuB6uf4oTLU1KXyq+s5/YPt/Px5hw+2pTNbRN7sSevimV7CsirbKDO6iQl2s/jhF6pgBtGx/Kf7w9x9bCoVkWMm+g1Sq4YEomvN76mJUmSejgfnYb7pibiqQatSatiev9QtmaVMyjKl8MltQSatPQLM/PD/iKuHNo2UAEwLSkEi16N0csCOYE+Ov44va/HY/5GDb2CzXy6NReVSsHBQlEb6GBhDcNi/T3eZ3t2BZMSW3fXbLA7Kau1kRzuOahx09g4lmzPZUyvANaml7IzR9SEq25w8M66TL7bU8C8sbEe7zsqPoCd2RW8vyG7OTNm7eFS5r69mVsn9sJHq2b94TLuWLiNu89LwHS0TpJaqeDOSQkYNKf38wo167n/gkSPx3oHm4gNbD+AJUmSdDLOikCO1JbT5ebnQ8UMivLr7qF0nNsFm14DUxBEjezu0dDHX7y5by3qwVk5CgUMvh7ytkHq9909mnNSdaOdHTmVHC6pBWBffjWFVY08dFE//I0a5oyK4eWfD/PMykO8MDul1cQ1yt/Af68dzOfbcimptbInt4r3fzeS2GOKLyaG+vDW3BFE+xswar3rA4EkSZK3CPTR8vzslFZNIpLDLbx0/VD8jBo+2JDF7ZN6szuvkka7i5euH0JFvZWR8f7cODoW3dEORxqVgquHRjJvTFxXdB4/IwJ8tLx03RBCLS1bjIZE+/G/64bw9HcH+GhzNpcOCuemMeJxV9bbqbM6GRLt1+Zab609wqOXJnPV0MjmhQq9Rkl+ZT2vzhnKtKSQ5gCaWadmwXkJGLUq/I0a7picwIs/pbW55ks/H2bG4AgWTEloLlitUiq4fHAET1zenye/O9jmPjaniw83ZnHF0EhAFLJetCmbK4ZEEhdoZNEto1q9954qpVLB9P5h/O2yZCwG8Z6tUMB5fYN5/3cjCbV47pYrSZJ0shRut1y+74jq6mp8fX2pqqrq0QUet2dXcOUr6/nbjP70DTP/9h16koPfwsZXYcTNEJjQ3aMB4P9+buC8GDX/mNCDi8m63fD9Q+L/b/8VlGd3nLanvRZLaqz8c9kBvtqZB4BWpWR8n0CuHxlDv3AL1Q12/r7sABqlguIaKzNTIokPNqFVKwm36NGplVQ02NEoFVj0akpqrKBQoFEpUSkVmPVqwnz16Lqg84kknY6e9lqUpNOxLr2Uv3+7n/nj4oj2N2LSqXEDpTVWDuRXcvGgSFbsK8Cs0xAXaKRPqJmc8jrMBi1qpQKFAuptTqob7CzdVcCXO3JZce9Eeod0fR3GznwtVtbbuPWDrfgZNdwxKQGtWonL5aai3obL7caNgkCTluyyOqoabYyIC6KgqgGH042fUcMP+4v4ZEsO1Y12RsUH8MfpfQny0RFo0lJUbaXW6sCkU2HUqjBq1ThdbsrrbDQ6nBi1apQKUCkUaNUqHvxiF5uPVFBva7ugdv/5fTBoVET4GVEAPno1SeFmXv4pnfc2ZBHhqycx1EyN1cGO7ApcbrFt7InL+/PAZ7sBETj68q6xWAwaQsydG2BxOF0UVVupsdrRq1UE+mgx672nVpIkST2XXNY9S/10oBizTk2fMzBx6FTlGaKVdszoHhPEAUgKULEhvwdn5IBY6hk6F777E+z7AgZe/dv3kTqN2+3C9+iq2/yxcVyWEsGPB4pYnVpCgEmLWqWkX6iZRoeTq4ZFUVlv556PdzS3IX90RjLxgUZ+PFDE7RN7E2zWg0KksFsMciuVJEnSmZBaVMPBwhr+vGRPm2N3Tu5NnU10qiqpsZEQ4kNlg53t2ZXYXW4SQnxYl14GbjdjE4KIDTSi16g8btXq6RrtTtKLRcetQ4W1PHRxP2obHezIqSQ2wMjI+AB+PlhEoI8es15LaW0jv6aVkhLtR22FA4tBzSOXJqHXqNifX43L7abB5sQnUINPO4GMY4sLl9VayS6v55td+UT7m5g9IpaDhdW8sCqt1Q5yrUrFJ1tyGRBpYWZKJGvTS1m5v5DxCUFcNTyafXlVbM+upJ9Jw4IpCXy+LZft2a2DQr5GDWZ95wdxANQqJZH+BuC3FwJdLjd5lQ2sP1zKntwqBkb5MrZ3EJF+BpTe+EskSVKXkoGcs9T3+wpJifHzrj/8jVXw0z/AFAx9L+7u0bSSFKRkVbaDknoXwcYenOkSkiy2o/30D0i+HFRy1edMsDmc5FU0ctHAcJRKBX4GDVe8sh4QE/+vdubzwYas5vM/3pzDqPgAnrl6EPd8shOlAnoFmZj/3haSwy043aKwoyRJknRm9Q72vAD2h2l9KKpu5LKX1jXf9t76TMYnBPL3mQP494pD/HdlavOxhZuyuXBAGC9dNwQXbpwuNyovmpPpNSriAk0oFfX8Y+YA/u/TXa0K92pVSl66fgifbs1mY0YFExODmD0ihud+SGVHTmWra03pG4xFr+aqYZ7rCB2vpKaRR7/ex3d7C5tv+3BjFpenRPDghf146uiWKZVSQf9IC/vyzQyI9GX+e1uOjl3J+Umh3P3RdjLL6puv8fqaDB6b0Z/JfYNZsr2lg+T8cfEEn0aHqs6yL7+a697cSK31aLHtTeCjU/PJbaMZEOmljUskSeoyPfgTqXSqssrqSCuuZURsQHcP5eQ5bfDT38FWB0Nu6HEBiORAsZ1lfU/PygEYciNUHIGdH3X3SM4ZxTVWrn9rIzq1gosHhPPcj2Ivv49OzYBI31ZBnCabjpRzpLSOlGg/nrpyEIcKawgwavn31YMI8pEZOJIkSd2hT4gPwebWH+p9DRp6Bfvw0eacNuevTS9jbVop2eX1bY6t2FtInc1BSbWVourGLhtzV/Azarn/gkRuGB3LC6vS2nRfsjld/GHxTq4cGk2t1cHyPYXc9uFW7pvWB/MxhZ0j/QzcOTmB8/qFiEzTk7D5SEWrIE6Tr3fmE+SjI9isQ6mA/14zmDqrgyuHRvHk8gPN5102OJLPtuW2CuKA2IH+xDf7SA63sDGjHIAxvQKYMSi82xc+i6obuWPRtpYgzlG1Vge/X7jN635/JEnqejIj5yy0fE8hOrWSgd7SdtztgrXPQVk6jLgFDJ47HnQnf72CGLOCtbkOLk/oWUGmNgLiIW4i/PIvGDwb1N2/ynS2O1RYQ4PdxRtrjrRKzZ6YGMTKfUXt3u/LHXm8dP1Q7A4nKpWZ85NDCbXoMOl6+O+YJEnSWSrcz8DHt47i1g+2caS0DoDz+oawYm9Bu/f5cGMWlw2OZF9+dZtj76/P4qaxcfib7JzM9pqeZGCkLz46NS+saltoGKDO5qTB5sSiV1Pd6KDRLgoHPz87hTWpJYzpHUh8kAmTTk2U/8kVEK6st/HWrxntHv9mVz6PX9Yfg0aF1eEkxKznl9SSVtutJvcN5t5Pdni8v8sNWzLLefCifgyL9Scu0NQmcNcdyups5FY0eDyWW9FAWZ1NFkiWJKkVGcg5Cy3fU0BKtB96jZcURd35ERxZAylzwC+mu0fTrgHBKtbkOHC73SgUPTw9OuU6+Pou2P4BjLy1u0dz1qtqsDf/f3l9y6qlQaOi8phjx6tpdLDzaAr6ef2CifA7/U4ZkiRJ0ulJCDGz+PbRlNXaqLc5CLXoedBDzZwmNY0ODFrPSe41jQ6cbjd2p/f1FvEzajFoTpwJUmt1oNOooFFkkvx8qJjHL+vP1KTQU/qeDpeb6sb23zerGx3kVTTw5HcHuHdqH/qGmXG4XG3OO9Hz3ehw8ftJvU9pfMcqrm6kssGOy+VGr1HidIFBqyLUou/wNjqb48QZ3za7F2SES5J0RsmtVWeZzNI69uRVMSo+sLuHcnIy18KujyFxOoQN6O7RnNDgEBVF9W4OlredMPQ4vtEQPxF+fRYc1t8+XzotAyJ9SQjx4fpRMUzt1zJ53Z1bxZhe7b8Wx/QOZFdOJRa9GoNGxtUlSZJ6ihCznqRwC8NiA4jyN3LpoPB2z53QJ4hdOVUej43pHYhFr2kuhu9tfA2aE273jQkwUnbMtiuDVtVmscvucJFVVscHGzJ5+Ms9fLUjl7yKtlvRxPdTM+0EQaCxCYGs3F+I2w39Iyxo1Uqm9A1pdU5uRQN9Q9vv2Hqi9+WTYbU72ZRRxtWvbeCC59Zw4Qu/8rv3tpJWXMujX+1hyfZcKuttHbpmoEnX3Lr+eDq1slUhaEmSJJCBnLPO0l35GDQqhsb6dfdQfltVHqx7DsIGQ/yk7h7Nb0oKUGJQw09Zjt8+uScYNAtqCmDHh909krOev0HD3y/vz90fi1TupmKZacW1RPgZiA5om05v0Ki4dng0qw4W0S/cgr9J1sWRJEnqqSYmBhPp1/ZvuUmr4nfj4tl0pKzNMX+jhmlJISgV4G/0zr/xoRY9j1ya7PHYZYMj2JBRhuuY5JfrR8YQ6NOyPdjpcrMzp5ILnlvDo1/vY+GmbO5bvItLX1xLWlFNm2tqVCrmjI7B4iHwFWzWMTTGny2ZFUzoE4SPTk2wj47YQCPDY1u25S/alMWC8xLwlDw9tnfg0S5Spy6rvJ45b21qVRcpo7SOez/Zwdyx8fzliz1sOlLeoWsGm3UsOM9zt9a7z0voEdu/JEnqWWQg5yzidrv5Ykcuw2P90al7+LYqp13UcNGZYcBMPL7b9jAalYKBQSpWZrWf8tuj+EZD/ASRleP0kjF7qVqbg8eW7qOy3s7j3+zjr5ckMXdMLEatir99s4//XDOYOaNi0GuUKBVi//6rNwzl2R8O8dSVgzx+OJAkSZJ6jgg/A5/cNprrR8agUytRKRVMTQrh1RuG8Z+VB3nm6sFcMjAcjUqBRqXg4oFhvHPTCIJ8dNhdblxu79taBaBUKpjSN4T3549oznIJMet48KJ+jE0IbFXPJjHUh3lj4tCoWuagRdWN3PrhVqyO1tnMFfV27v1kZ6tsnibR/ka+unMclw4KR61UoFUpuTwlgv9eM5jnfkjlzsm9+evFSfQLs6BVqwg263np+qHcc14CFoOarLJ6DhRW88mtoxkS4weAn1HDH6b14blZKQSdRnZLo93JG2sycLja/jytDhfL9hQwNSmUp787SEnNyWdE6zUq5oyK5flZKcQEiG3WMQFGnp+VwvWjYr2nXIIkSWeMwu320neWblJdXY2vry9VVVVYLJbuHk4rWzPLufq1Dfz14qSe36Zwx4ew51MYdSf4Rnb3aE7ar7kOXtlhY8McH8J9vCAOWpEFS++Cy16CoTd292g6VU96Le7KqeTyl1ta0qqVCqYlh3L1sChCzDoseg21jXa0GhUNNif7C6oorbVx0YAwovyNcoImebWe9FqUpK7WaHdSUW/D7RbZJt/tKWRHTgXxQSYuGhAGiOBHdYOdHVkVONxulu8p5K15w0+64O+p6urXYmmtFZvDhUqpwKBRkl3ewCebs6lssHPZ4AgGRfkS5tt6YaJpbtqeH++fSEKI521QdVYHVQ12FIBWraTO6sANmHVqAjwEYxxOF6W1VlxuMGpV+Bm1VNTbaLA5USoUBPtoUalOb+5WVmtl1hsbSS+u9Xh8UJQvY3oF8vqaDNb+aQpRAR3/mRfXNOJwulGrFK0aKEiSJB3LOzfsSh4t3pJDsFlHckQPn0iXH4bdn0Lv87wqiAMwNFSFWgHfHbHzu4FekObqHwuxY2HNMzD4OlDJl3xXcB63MudwuVmxt5AVR9unvnPTCO5ctI1Gu4uBkb7EBRq5eUJ8u5NXSZIkqWfSa1SEHw1WlNQ0si27HK1KyY7sSl775XCrbUaj4gNICrcQ4WfAcBYE7I/PZBkQqeUfVwzE5XK327673vYbRXwd7dcdNOnUmI5pZf5bdWLUKmWbQJK/UUtnxs/0GiVR/oZ2AzkRfgbK62wEmrSoVKeWbS6DN5IknQwvSCmQTkZVvZ1vduVzXt8QlD15m5LbCetfBJ8Qr6iLczyTRsGgECVL071oq9Kg2VCZBbsXd/dIzloBJi0+Os9BMlEbQUOjXUxW9+RVsSatlGA5UZMkSfJqvgYNkX4Gvtld0KZWDIh6LLtyKrllfPxZXay2vSAOiO1B7U1LLQY1vl5WO8ik03DnZM+1bABmpkSyYm8ht0/qLQMykiR1KRnIOUt8ujUHh8vN5L7B3T2UE0v9HkrTIHmm12aHjI9Us7PYRWaVF3SvAgjoBTFj4JenZa2cLhLpb+CP0/t6PHbTuDi+3pHX/HWQj5aPbhlFhK+c4EmSJHkzrVrFvLFxWPRt5zOhFh39I3y5aGAYvUN8umF0PUOgj5abxsZ5PPbQRUmEemER375hZh69NBn1MQEsrUrJgxf2Y21aCecnh3LFkIgOtyCXJEnqCO/8JC214nC6eHf9Ecb0DsSvJ69sWKth2/sQOVxs+fFSQ0NVGNXw+SEbD4z0kg/jKXNg6d2w7T0YeWt3j+asozlaiDHUouOZ71M5XFJLpJ+Bu89LYFJiMDVWB2MSgggwaYnyMxDmq2/TnlWSJEnyPtH+Rr66axz/XZnK9/sKUSkVXDY4gt+Nj8eoUTE8zr9nz826mFmvYcGUBBJDzby4Ko38qkb6hPjw4EX9GB4XgPo0a9Z0B1+Dhtkjo5mWHEp6UQ0uxO9BVYON85NDCfLRel2mkSRJ3kcWO+6gnljU8eudedz7yU6evGIg8UGm7h5O+za+Cod/hPH3i25VXuzt3TZ2FTvZcINPqxWZHm3tc1CwE+7eDga/7h7NaTtTr8WSGislNY0U11gJtegJNutO2PGitMaKzelCrVQQYvGSQJ8knYae+L4oSWfasYV5/U3abili35Nfi263m5IaKw6XG41KKdtpS5IknSaZkePlXC43L/+czuAo354dxKnMgkPLIfFCrw/iAEyNVfNjloOVmQ4u7qXp7uGcnCE3QvZ6+PlJuPjf3T0ar5BdXs9tH2zlYGFN822Do315Zc6wdluGB8nJqSRJ0jnn+MK8UmsKhVzckCRJ6kzel88otbJsTwGpRbVcOTSqu4dyAm7Y/CYYA0StlrNAnK+SpEAlb++2dfdQTp4pCAbPgc1vQOa63z7/HFdWa+WOhdtaBXEAduVU8cfPdlFZ70U/e0mSJEmSJEmSzhoykOPFbA4Xz3x/iCHRfiSG9uAsl+xNkL8D+l7stQWOPbm4l5ptRU425Tu6eygnL2kGhPaHJTdDTVF3j6ZHK6u1sS+/2uOx9YfLKKuTgRxJkiRJkiRJks48GcjxYu+vzyS3op7rRsZ091Da57SJDJCgRAju192j6VTDQlXEWhQ8t9WK15SaUqpg4h/Fz2XhlVBX2t0j6rGqG0/c4avO6kUBPEmSJEmSJEmSzhoykOOlCqoaeO7HVKYlhRIdYOzu4bRvz+dQXwr9LoGzrEuPQqHgmr4aNhY4WZ3jRR/qjYEw7XGozoM3p0DO5u4eUY/kf4KOE0oFWPReUhtJkiRJkiRJkqSzigzkeCG3282DS/ag16i4dnh0dw+nfVXZsOdTiJsIPiHdPZouMTRURf9AJX9b10ijw0uycgD84+CiZ0BtgLfPh8U3QtYG8JbMojMg0EfL1CTPv7eXp0QQ6CNbi0qSJEmSJEmSdObJQI4Xen99Jr+klnDz+Pie2yHB5RTtrvW+0HtKd4+myygUCm4aqCW/1s1zW63dPZyOMYfBRf+GsfdA3jZ490J4aQSs+x/Ul3f36Lqdn1HLP2cO5LLBETR1mFcrFVw7PIqHLkrCLDNyJEmSJEmSJEnqBgq31xT36Bmqq6vx9fWlqqoKi8Vyxr//5iPlXP/mRqYlhzJvTNwZ//4nbceHsPtTGHU7+PXgGj6d5Jt0Ox8fsPPWhQamxnrhB3y3Cwp2Q/oPkLUeVBoYcTOMv190G+uBztRrsc5qp7TWRp3VgUmnJthHh7GnBlAlqRt09/uiJEmCfC1KkiSdO+SnES9ysLCaWz7YQt8wM3NG9eDgSPZG2LUY+lxwTgRxAC7prSatwsWCHxtYeImCYWFe9tJSKCEiRfxrqIQD38CWt2HbBzDlLzDilrOq41hHmHQaTDovDM5JkiRJkiRJknRWklurvMT27Aque2MjAUYtf5iWiFrZQ390hXvgl6chrD/0mtjdozljlAoFdw3REmdRcsOyelZknLjjUY9m8IOhN8IVb0DsGFjxILw+ATLXdffIJEmSJEmSJEmSznk9NBogNXG63Lz1awazXt9AiEXPXy9O7rl1cY6sgR8eEYV0B14rsjzOITq1gj+P0jEoWMXvf2jgzh/qSS13dvewTp3BD8YsgEufE1uv3rtYFEUuSe3ukUmSJEmSJEmSJJ2zemhEQKqzOli+p4DX12RwuLiWCweEcd3IGDSqHhgcqS2G7R9Axs8QPhgGXCVqrJyDdGoF9w7TsjbXyeJDdi74rI7kQCUTotQMClbRx19JjEWJXu1FrdgDE0RR5IzVsGMhvDxStJMfcTPETwKlqrtHKEmSJEmSJEmSdM6QxY47qKqqCj8/P3Jyck67kJzb7abe5qS83k5xjY28qkYySuvZk1fDrrxqbE43AyPMzBwUQlygsZMeQSewN6KoLUBVfhhVwTZUhTtBrccePw1nWAp4UYyiKzlcbnaUwLZiN2kVUH5MU6tAPYQZIcykINgAQQYFgXrw14O/ToGfDnx14KsFraoHPaFOO+qsX9CkfYeyOgeXIRBH3CSckSNwBSXj8o/DrfcHxamP2Ww2oziJ+3fma1GSpLbka1GSegb5WpSknuFkX4uSdCbIQE4H5ebmEh0d/ZvnKTQ6wuc9jybwt89tj6uxFmddxSnf/3RYdBBhPvnsH7tTBKak9tUqTFQo/Tv9uvHOLJ6v/ytazmxdnqTgjmXiTHm/jtWZv73V7GS7bZzsa1GSpFMjX4uS1DPI16Ik9QyyI5zUk8hATge5XC7y8/Nxu93ExMR4xapHdXU10dHRXjFWkOPtaj19vCe72tH0Wjz+/J7++Lraufz45WPv3Md+uq/F7nIu/x50Jvk8do7OeB67+7Uofxc6Rj5fHeNNz1dPeZ+TJJA1cjpMqVQSFRVFdXU1ABaLpcf/0WniTWMFOd6u5m3jPV7Ta7E93v74Tte5/PjlYz+zj/23Xovd5Vz+PehM8nnsHGfieezq16L8XegY+Xx1jHy+JKljemDlXEmSJEmSJEmSJEmSJMkTGciRJEmSJEmSJEmSJEnyEjKQc4p0Oh2PPfYYOp2uu4fym7xprCDH29W8bbwddbY/vt9yLj9++djPzcd+PPlcdA75PHaOs+F5PBsew5kkn6+Okc+XJJ0aWexYkiRJkiRJkiRJkiTJS8iMHEmSJEmSJEmSJEmSJC8hAzmSJEmSJEmSJEmSJEleQgZyJEmSJEmSJEmSJEmSvIQM5EiSJEmSJEmSJEmSJHkJGciRJEmSJEmSJEmSJEnyEjKQI0mSJEmSJEmSJEmS5CW8PpDz8ssvExcXh16vZ9SoUWzevPmE51dWVnLXXXcRHh6OTqcjMTGR5cuXn6HRSpIkSZIkSZIkSZIknTqvDuQsXryY+++/n8cee4zt27czePBgpk+fTnFxscfzbTYb559/PpmZmXz++eccOnSIN998k8jIyJP+nm63m+rqatxud2c9DEmSToF8LUpSzyBfi5LUM8jXoiRJ0rlD4fbiv/ajRo1ixIgRvPTSSwC4XC6io6O5++67efDBB9uc/9prr/HMM89w8OBBNBrNKX3P6upqfH19qaqqwmKxnNb4JUk6dfK1KEk9g3wtSlLPIF+LkiRJ5w6vzcix2Wxs27aNadOmNd+mVCqZNm0aGzZs8HifpUuXMmbMGO666y5CQ0MZMGAATz75JE6n80wNW5IkSZIkSZIkSZIk6ZSpu3sAp6q0tBSn00loaGir20NDQzl48KDH+2RkZPDTTz8xZ84cli9fTnp6OnfeeSd2u53HHnvM432sVitWq7X56+rq6s57EJIknTT5WpSknkG+FiWpZ5CvRUmSpHOX12bknAqXy0VISAhvvPEGw4YNY9asWfz1r3/ltddea/c+Tz31FL6+vs3/oqOjz+CIJUlqIl+LktQzyNeiJPUM8rUoSZJ07vLaQE5QUBAqlYqioqJWtxcVFREWFubxPuHh4SQmJqJSqZpvS0pKorCwEJvN5vE+Dz30EFVVVc3/cnJyOu9BSJJ00uRrUZJ6BvlalKSeQb4WJUmSzl1eu7VKq9UybNgwVq1axcyZMwGRcbNq1SoWLFjg8T7jxo3jo48+wuVyoVSKGFZqairh4eFotVqP99HpdOh0ui55DJIknTz5WpSknkG+FiWpZ5CvRUmSpHOX1wZyAO6//37mzZvH8OHDGTlyJM8//zx1dXXMnz8fgLlz5xIZGclTTz0FwB133MFLL73Evffey913301aWhpPPvkk99xzT3c+DOlcVFMAZRlQtBf8YiG0P1giQem1SXKSJEmSdG6qLYaKLMjfDpYICB8M5ghQefU0+/Q5HVCTD4V7oCoXwlPAPw7Mob91T0mSJOk3ePU7zKxZsygpKeHRRx+lsLCQlJQUVqxY0VwAOTs7uznzBiA6Oprvv/+eP/zhDwwaNIjIyEjuvfde/vznP3fXQ5DORRVZsOgqKE1ruc3gD3O/hrBBoFB039gkSZIkSTp5VXnwyfVQsLPlNq0J5iyBqBHnbjDH5RSBrYVXgrWm5fbQAXD9YvCN6r6xSZIknQUUbrfb3d2D8CbV1dX4+vpSVVWFxWLp7uFI3qahCj6fD4dXtT1mCobbfgHfyDM/Li8kX4uS1DPI16J0zrLVw7L/g10ftT2mNcGdG8Ev5owNp0e9Fitz4NWxYPXQSav/lXDZi6DzOfPjkiRJOkvIfRySdCbVl3oO4gDUlUCVLFQoSZIkSV6hrgT2fub5mK0OCvee2fH0JKWpnoM4AAe+hrrSMzseSZKks8w5mu8pSd3E3nDi4/VlZ2YcknS2cDpEcNTthj4XyDpTkiSdOU4bOO3tH68pOHNj6WlqCts/5nKC03rmxiJJknQWkoEcSTqT9L6gM7feL36sgN5ndjyS5M2cdlh8I6R+J77uPRWu//TcrUkhSdKZpTWBObz9gE1EyhkdTo8SNqD9Y6Yg0MptVZIkSadDLl1K0plkDoNJD3o+lngR+ISc2fFIkjfb/AakfQ/nPQpTH4OM1fDL0909KkmSzhXmcDj/Cc/HIkeAb/SZHU9PYomA2HGej019TDx3kiRJ0imTgRxJOpNUGhh8HVzyrFiRAtAYYfRdMON5MAZ06/AkyWtYa2D105B4IUSPFN1hBl4N6/8H1fndPTpJks4FCgX0OR+ufEsELgBUWhg6D659/9xenDEFw1VvwfCbQa0Tt5nDYOar0O9SuQ1WkiTpNMn8c0k600yBMGy++ABqrwe1HkwhoNF198gkyXvs/UIEcwZe03Jb/6vg0Hew/iW48MnuG5skSecOg78IIseNEwWO1ToRxNAYuntk3c8SAdOfhHH3iZo4TVvRFIruHpkkSZLXk4EcSeoOSqVsMy5Jx0kvruVwSS0T+wRj0KpOfPL2DyBymPjA1ERrhIRpsHMhTH1EfpCSJOnMUChaMnKk1jR68D9zLdglSZLOFTKvUZIkSep2H23KZtqzv3D7h9u46IU1lNfZ2j+5Oh/ytkKvSW2PJV4IjVVw4JuuG6wkSZIkSZIkdSMZyJGk7tBQCaVpkL0RivZDbUl3j0iSus3+/Goe+Wov05JCeOrKgVTU23ngs13t3yFtJSiUEDm87TFLBAQnia1XkiRJx6svh5JUyN4AxQegrrS7R9Sz1RRD0T4xXyk7LALlkiRJUreTW6sk6UyrLoDlf4SDx2QMhA6AWQshIL77xiVJ3eTZHw4RbNExb2wcaqWSeWNi+d9P6WzLKmdYrIcC4KnfQ0gS6MyeLxg3Hra/Lz5w6H27dvCSJHmPqjxYugAO/9RyW+RwuOZd8JPbf9ooTYfF10PJIfG1QgEDroEL/i4KF0uSJEndRmbkSFJns9eLyWJVrih8eCxrHfz0j9ZBHICivfDRNVBTeObGKUk9QHpxLT8eKOaKlEjUR7uYjOoVSLS/gTfWZLS9g8sFWesgPKX9i8aOBacN0ld1zaAlSfI+jVWw/IHWQRwQ2zQ/nXf6mbE1ReJ9v7bo9K7TU1Tnw4czW4I4AG437PkU1r0AjsbfvoatTjwnVXlgb+iyoUqSJJ2LZCBHkjpT+RH49gF4cQi8MBi+XiBSkd1ucbyuGHZ/4vm+pWlQU3DmxipJPcCS7bn46NSM7hXYfJtSoWBKvxBWHShuWyun5ID4QBbav/2LmoLBPx7SfuiiUUuS5HXqSiD1O8/H8rdD/Slusaorhd2fwjsXwHP94d2LYN+XYguXN6vIhKocz8e2viMCV+1xuUQ2z9d3ibnQi0NEJnJFZleMVJIk6ZwkAzmS1Fkqs+Gd6bBrETis4HLAvi/gralQmSXOsdWK29tTlXdmxipJPYDL5ebL7XmM7hWIVt367Whc7yDcwLe781vfKWs9KNUQ1PfEF48cJmrpuFydO2hJkryTtaZlUcWT+rKOX9NWD5vfhC9ubQlSlB2Gz26CnYvAbj2VkfYMJwq6OBrBcYIMm8oseOs8EdByOcScaMeH8M6FUNlOcEiSJEnqEBnIkaTO4HLBvq88p1Q3VMDW98BpFzU9VNr2r+MX1VUjlKQeZ29+FYXVjYzp1bYOjsWgITncwo/7j3tN5WyCwARQ60588cihYoW9aG8njliSJK+l9xVF0ttjDOr4NeuKYe1/PR/7+Z/evc0qsHf7x7Qm0Bg9H3PYYPMbnosi1xTAoeUnDqhJkiRJJ0UGciSpM1ir29a9OVbqctGpyhQKQ+d5PidsEJgjumR4ktQTrTpQjEmrIjHMc9HiITF+bMgoo856TBZb3nYI7PPbFw9OEsGeI7900mglSfJqpmDof4XnY7HjxPGOqi0WizSe2BtOfbtWT+AX034wZ9Qd4NNOsePGCkhd0f51938lspMlSZKk0yIDOZLUGVQa0FnaP66zgEoNWgNMfACG3AhKVcvxmLEwexH4hHT9WCWph/jpYDGDovyaixwfb2iMP3anm3XpRz8MNVZB+WEISvjti6s0EJIMGTKQI0kSIiP2gn9C/ytF96UmvafClW+AKbD9+7ZHrf+N47+ROdiTmcNhzhKxTbWJUg0jboNRt4O6nexi5W/Mh/R+4jqSJEnSaZF/SSWpM2hNMOYuSP/R8/ExC8DgL/7fHAYXPg0T7hdZOlofMAWB0UObZUk6S1U12NmXX8UtE3q1e06oRU+IWcfGjHIu6B8GBbvFgZPJyAGR5bb3c3A6RCBVkqRzmyUcZrwA5z0sAsM6s8jEMfid2vVMQWCJEB2ejhfQC4ynEBzqSQLi4frPRGaRrU7MY3xCxJynPcYAGLsAltzi+fjoO0Bj6JrxSpIknUPkzFaSOkvYIBh9p2hlWl8OZeni9uTLIWZM63N1PuKfJJ2jthwpx+WG5PATrNwCSeEW1h8+mpGTv0OsgFsiT+6bhPaH7e+LOjkRKac3YEmSzg56i/jXGczhMGshvD9DBDpAZOFEjYJLnxULN97OFNjxbKX4idD3Eji0rPXtw38nMiVPRlOh+nYyNiVJks51MpAjSZ3FaYc+54sVPp8QEdhxuaA6D1JXQu/J4Bfd3aOUpB5hY0YZwT46Qswn3nqQFG7hl9QSKups+BfvB/+41tsSTySwjygunr1RBnIkSep8CgWEp8AdG+DwT6KgsjkcstbCweViTmCJOPWMH2/lEyoyn8bfB3u/EBmRA64WdXd+K/u4pgiK98P2D0ChgmHzILhv5209r8qFwr1QsEtcN3KYWByQASNJkryMDORIUmeozIYPZ4q2o03Uepj5Gmx+DXI2i0nI/O9Exx1JOsdtPlJO3zAzimNrVXiQdLQQ8o6cCs4r2is+CJwslQaC+kD2ehj9+9MZriRJkmdKlfhbYwqG/V/Dnk9bjv34KIy/H8befe5tn/YJFv+iR578fWoK4cvfQ8bPLbft/Qz6zYBL/ysCRKejNA3ev1R8nyZ6X5j3jVh8+433I0mSpJ5Ehp8l6XQ1VsN3D7YO4gA4GmHpXTD2HvF1bTGseEicL0nnsEa7k/0F1fQJ/e3thcFmHX4GDdszy6HkkMjI6YjgJBFIlSRJ6goup8i+qS1qHcRpsvZZ8bdL+m0Zv7QO4jQ5+A3k7Ti9a9eVwpKbWwdxQNRK+ni2aI0uSZLkRWRGjiSdrvoySP3O8zFbHTRUiIKIdaWQ/gM0lJ/6/vym6+37GupKRNp2YAKYT3OVSpLOoL15VThcbvqEeG47fiyFQkFCiA/bDxeA0wZ+cR37ZkGJouBxdb7Y4iBJktRRLpfIvM1eD0X7IXwQxIwWLbhtdeLfwaXt33/jK2J7p7cW+a0uEB0DU1eK+Ua/GWK7WGfVGgJRW3Dza+0f3/gKxI0/9fqC9aViO5Un1fkiECffIyRJ8iIykCNJp8thBber/eP15aIzVV0puN1i9a4jXE6ozIGD30L2BrG1pPd5kPETrHsOIofDrA/lBETyGjuyK9GplcQEGE/q/IQQH5buKMepUqDyj+3YNwtKFP/N3QrJl3VwpJIknfPcbijYCR9cBtYaUcA4egQcXAZZ68R2n5Q5sP299q9RVyLq6HljIKcqFz6/GfK2wLTHxTbxnx4Xc5OUORA2sHOKOrtdYG9o/7i9HlyOU7++vfHEx621p35tSZKkbiADOZJ0uvQWMZGrLfJ8PKhPS2vS0P4iqNMRRXvh3YvBdswkY/MbcPnLYK0W20a2vA2THxT79CWph9uZW0l8kAmV8uTqEfQO9qHeoSDDkEgfvW/HvpkpSPzL3SIDOZIkdVxNASyeI4I4aj1c/gp8e5/I0GnSUAFxE6A8w/M1Eqd3/L2/J3A6YfuHkLMRLnwaMteKRaUm+76E2PFw9VuiyPPp0PtD8hVQfMDz8UHXino2p8oYABqjCAgdT6GQi2GSJHkdWSNHkk6XORwu+IfnY70mi73xTpsoiDjxz+DuQEZOTbHY0207bqXI5YDv/iTanQNseVOs+EmSF9iTW0VckOmkz48/eu5ebcqpfcPAPqJ1uSRJUkfVFLYsxgy4CnYuah3EATjwDfSfKQIFxzMGQP8rvbMrUk0+bD0apNGZWwdxmmSthdQf2t7uckFVHpSkQkUW2E6QbQOgUkHKdZ6ze/xioO/Fp1eM2CcUJv7R87Gh80WxakmSJC/ihe8qktQzOF1u6m0OnC43JF4IsxaCf7w4qDPDmLtFoGXjq9B7Ksz7FhorwOZhNag9DWWiy4InjVUi5VulFf/f0S1bktQNqhrsZJfX06sDgRyTTk2Yqpo9nGLHt8AEURvB7T61+0uSdO46drtPn/PhwDG1cHyjod+lED8BVj8N85bC0HminbVCKYIPv/sBOroltKdwO6GhUmQU7f+69bHwwZB0GUSNgM2vi+3jTerLYccH8MZEeHkEvDQc94o/twTE2uMXAzf/CKN+D8ZAEVwZdx/ctBx8o07vsah1MHQuzHixJXvIGADTnoApD3VuvR9JkqQzQG6tkqSTZW+Aqlzce7+AsnScg29EYwrAemQDOpUCRfwElDd9K9KvyzMg81cRhJn6CBTvh0+uh8kPiSDPyXLaT3zc0QhKtaiT441p29I5Z19eFdCSZXNy3MSRzx77Kaa+ByaIbYjlGRDY+9SuIUnSucknRGTUupyAQrwvG/zFViNHo9huFNALBl4DdcXi703vKbhDklGYQsDo392P4NRpfSB2nNi23RTQChsI5z0MBbuh5CDETxJFiJtqBbqcYsvVsvtbruO0odj+Pu6yNNxXv4/SHCLqCzZtV2usEtcqPgDBfWHMAphwv8jqMQV13rZxUxAMvRH6TBPfX6UVGUBKVedcX5Ik6QySgRxJake9zUFFnQ2XGwJ0bkw5q2HxDSjcLpj8ENrUb2DzGxw7vXCPuw/FoGtF0MaTdc+Lbg8nyxgg/tWXtz2mVIsVK6cVpv/TuyeL0jljb34VOrWSCN8OFP2sKyXencvXdYm43G6UHU2vDzyayVOwSwZyJEnqGHMEjLxNZNcW7YXYsSJL5IdHRfBBZ4Zr3oMlv2uVQavQmXHfsASFfrj3BgpMQTDtMVj2ACTNgKocmPJXWHKrCI432fAi3PClmJPUFMDPnrebK7LW46jKR2mtgc2vQvFBscC15ObW7b99QmDuNxDSr/Mfk6yHI0nSWeKs2Fr18ssvExcXh16vZ9SoUWzevLndc9977z0UCkWrf3q9/gyOVuqxakvEpKJoHw1lOfxvVRoTn1nNjJfW4qjMhc/nixUnnxAIiBcFh4+jWPc8lB8Re7E9qc4Hx9GtVS6n2D9euBdKU8U+/Ios8XVltlgtMofDhf/2fK1Rt0PedvjdSghJ6pznQJK62IGCGmICjChPstAxANV5xCqKqHcqyak+he1Rel8whYjOM5IkSR2hM8HYe2D6U5C6QnRuSl0hgjix4+D6z2DTG223QVtrUHw067e3E/V0gX3gwqcgPAUmPAArH24J4vQ+D65+B654A2w14rHa6tsuPgX3hUuehWs/RO2yig5Yh3+GvhfCd39sHcQBqC2GxddDTTtNJI5nq4fyTCjcI/7rqaBxE6ddzLEK90LZYZENJEmS5IW8PiNn8eLF3H///bz22muMGjWK559/nunTp3Po0CFCQkI83sdisXDo0KHmrxWnUzxN8n4up3jzX3o3aI2g1GCoLeK+qf/g8v7DiNXXYcjbIAIrIPaE717c/vW2vCVWkmoLYPkfRZCmiVonUnkbKsVE8Pu/gPboJNE/HjR60SJz02siGDTqdrE3/ablsOpxsUXLLxYm/UlMqnRmkbEjSV5iX37VSbcdb1adT4yyFJxwoMxJrO8prEH4x4mJuyRJUkdZImDEbbj6XYLC3oCi1xTRetvlBFsdpK/0fL+GCihLA58wUHtpV0m9hcbw4Sirc9GYAlBMexx8I0FtgIzV8O390Fgpzo0bLwI2138qFr4KdkHxPtGNatUTUHEEBYitaNMeB50PFO3z/H3LDuOsLaYCXw4V1VJRZyMxzEywjw5/k7blvJpC+Pkp2LVIBGlUGki5QXTyPL5wcl2p6ML1639EEwmFAhLOh0v+K+rzSJIkeRGvD+Q8++yz3HrrrcyfPx+A1157jWXLlvHOO+/w4IMPeryPQqEgLMxDVXzp3FSZDfu/EsXuMteJDlMjbkFfkUov/1h0m98QbcOb6H1F9k576kpg72eQ+j1c8y58eQcU7hbHBl0nMgOOrIEvbxetyac/BSv+LFaGQExCRtwCMWMge6MI5MSNg+sWg6NBBIJMQV32dEhSV7E5XGSU1DEuoYO/vzUF+Bm1+DbCgXInF/Y6hQ9E/vGQ8XPH7ydJ0rnBVicWbHQWUB03PXY5oa4Ixa//RbHjw5Z6MGGDRDtyn7C2WSVNyo9AQB/wj+7a8XchTVUWqiW/a8lqVChFO/De57XeYpW5FhZdDVMfhSW3QMxouOQ5+OByUT+oSXmG2E4197gCysdR1Bbit38pvaKnc8hl4vMN+USHBjJjcCR+Ri00VsP3f4W9n7fcyWmHbe+KLJ1L/tNSxNjphN2fwqq/tZzrdkPaSvhoFtz4FZjbyaaWJEnqgbx6a5XNZmPbtm1Mmzat+TalUsm0adPYsGFDu/erra0lNjaW6OhoLr/8cvbta2c1QDr7uVwiHdpWCx9fBxteElumPpsHGavRqtUi46WpxgaIrJjoke1fM2qEKNhXuBs+mQMznj96+yiRSWOtgR8eEbed9yh8eVtLEAfEJKRpL372Rkj7AaoLRA0cS4QM4kheK724FofLTWxARwodA9X5KEwBxJiVHChzndo3D4gXHyROFISVJOncU18uAhCf3QQLrxTdp8qPiPmByyX+v+QQbHgJxfb3W4I4IN7nP71RZJe0J3ywCPI4HV3+UDqdrQ7KMlB9fE3rraluF+z6RMxRBlzZ+j6V2aIItE+oOL7kZpjkoe23Sgt6v/YLGSuUKN0u1L/+m/CPpjIp/y3+MkrHnPpFWH7+C+7DP4mFs8M/er7/3k/F8Sa1BbCmna3qxfuhMqu9Z0GSJKlH8upATmlpKU6nk9DQ1hH00NBQCgsLPd6nb9++vPPOO3z99dcsXLgQl8vF2LFjyc3N9Xi+1Wqlurq61T/pLOJoBJcDNr/Z9lj6jyjSV4kVN40RBs8Wt6ethH6XeO4SpTVB8mWQ+p34uiJTBG7mfA6DrhErV06r2FvvFwO1RZ4LGQNsfEXUvlk8R2z7Kj/SKQ/ZW8nXovc7VCR+ZtEBHSh0DFCdB4ZAYixKDpQ5T+2b+8eL/xbtObX7S83ka1E6azRUwvoX4b1LxHt7/g749Rl4fZL4YJ/2g2g3XnoItr3n+RoVR0QXK08dKXtNEcGKhjKw13X68Lv0teh0QGUOVOe2Xmw61s6PxHbz4xUfaNmqVLxf1PtTaVufk3I9HPlFtGv3ZOg8kdl8lGLLW1ByAOW2d1Du+gjFgW+h/DDc8BVc+hyEDmh9f7dbbG1rYqtr/XWbMe9v/5gkSVIP5NWBnFMxZswY5s6dS0pKCpMmTeKLL74gODiY119/3eP5Tz31FL6+vs3/oqO9NzVW8kChap2SC6Ib1Ni7Yc5n4BMsMmCK9oh91Bf8AyxR8PPTIiU4ZkzL/WJGi6J/q/5+tE3pURWZYvXKN1LsBXdYxb5tS4SYhLSntqglJTj9B1HYuLqd1O1zgHwter9DhbUE++gwajuwq9ftEjUQjIFEW5Tk1Lipt59CwWNLuGhzWyQn66dLvhals0ZtEax9tu3tKbNh3QugcIvaLihOXEC3IhMu+GdLTRaNEUbeCsPnQ2Um7pD+4u9PJ+vS12JtoajrV3iC4Lej0fPtvlFQX9bydXW+CHbpfaHPBTDzNRhwlehyNXy+6FxlONp5U+8L4+6FIXNEcO1Y+74Ubd6v/UD87D6eDW9OFj+rUb+HITe2Pl93zIKbWi8eT3tkjRxJkryMV9fICQoKQqVSUVTUuqp9UVHRSdfA0Wg0DBkyhPT0dI/HH3roIe6///7mr6urq+Wk9azialmhMYdDwjSRebP5DVh0TctpAb3g4mdg75cw5S+i0HBtsahdc+HTUJUr0o6X3i1uP5bWR6QWW2sgKFHUuhn1e9j6Dgy4uv2h+YSI/d9N9n8pJkcuJ/hFddYT4DXka9H7pRZWE+XfwWyc+jKx3dAYSJRWFKZPq3AxOKSD7XwVSjFRLznQsftJbcjXonTWyFjd9jaFQswFvr5LbItyOUCBCM60F8wxBsCOD2HKwyIQERAPWetExo/TIRaNThREOEVd+lp02EQ2jiWy/XPUesSTcwytSTRlKM9ouc0SAVMfE4Ebgy98cZvYggXi+T7vEbjhC9He3GGFPZ+K62iOe79orISkBbB0QevrV2SK2654XTzv5RkQOQyMwS3n+ISIIshb3277OIyBIpC0f6n4mfuEiuYTkiRJPZhXB3K0Wi3Dhg1j1apVzJw5EwCXy8WqVatYsGDBSV3D6XSyZ88eLr74Yo/HdTodOl3nv/lKPYTGAMmXQ+KFYtJQUwT7vxb/jlWeISZ1l74A9gZwO8Ub/ZZ3RcqxMRB2LoL+V0BQX2goh71LxOTNaRVBHBAdrBZeCbMXQVm6WL0z+HtO9x1xK+z6qOVrlxO2vydWqOZ+A6HJXfa09ETytej9DhbVMDTGv2N3qj0aqDcGEKVXogAOlTs7HsgB8I2WGTmdQL4WpbOG20PNLWPQ0SCDQgRxhtwIPhEwdK7oKHk8v1jxPp+xGqry4Mo3RECiaL/IxO13OQp319TH6drXohustSIAFdCrdeCkScr1YutZE4M/XP2uyDaOHCoyiQN7i0WsT+fCRc9A7lbxXKavguwNYgvUqifEPKg0tWU71bCb4NfjsqXiJogt6uUZ4nkfcJUInBXvE/O2dc/DsPli7nT1O2AKbLmvxgAT/yh+tuk/tNzuEwqXvSgaUJQcElvArn5bZGEfH0iSJEnqQbw6kANw//33M2/ePIYPH87IkSN5/vnnqaura+5iNXfuXCIjI3nqqacAeOKJJxg9ejQJCQlUVlbyzDPPkJWVxS233NKdD0PqTr0mw/I/Qdr3cM17oi2lJ8N/JwI4W94QW5wihsKNX8CeJdB7kphQbH5T7KM3h4kVppD+8PGs1tepOCKydixRYtJz/adiAtE0SVJpxN5wvS9krW+5X+J0WPMf0T7z41lw80qRRSRJXqDW6iC/spHLB3ew9XjN0XpnBn90KgVhJgWpFadY8NgvVnxwcLlAec7tLJYk6Xi9Jre9zWkT2Te1RRA+SGwfenc6XPGa6NS05/OWAFBof7jqbZERcstPohbOD4+KLN2IwRB5OajVuOvLUPjHnslHdvo0BpHFUpkLF/8HfvxbSwdOhVIsXA2aJRaq+pwPplCxILbhZRHISZoBkx4S29M3vyW2pudsbNnO3m8GjF0gMpnry2H7B+L7pH4vaguVprfeuuUTIgI5+78W19KZxQJabQlEDYfZH8OaZ8TWrUGzPHegsoTDla+LOVhZhsi6qSuBlX8VjS9A/Pw/nQd3bRadRSVJknoorw/kzJo1i5KSEh599FEKCwtJSUlhxYoVzQWQs7OzUR4zYa+oqODWW2+lsLAQf39/hg0bxvr160lOPreyGyRwu92U1FgJKD2MOu3oCpDODH7RIsvGdcwK2rCbxGrRJ9e33FZxBA58DTd+KVaUPrhMbAFpOpa9QWyhSrywbSpvyUHY/j6s+ReEDYZx94lsAY1BdNDa9wV896eW86OGi2M6M5iCxYpSbbEM5EheI61IZKV1eGtVbaGoFXW0s0mkj5JD5ada8DhWbI2ozBJbHyRJOreZw2DUHbDp1ZbbGqtEC3KtSQRsfvyb+O9Xd4gFnes+BnujeP81BYntU3nbxTnHLgRVHIED38Dsj1FYIsQ2UWPg8SPouUwh4rn49l7x93fcvWJrucspMo1cblFA2GkF3xgxZ2kK9ID4/5D+MOtD6DtdHD+2aHLJIREoufQF0fmrsUps1Rp7j1jM+uERETBSKCHpclFLZ/2L4r8Hv4UdC1uu1fRcX/M+LoUKh7Ueu6IGk4+HAtTGwJZ/b18g7ns8t0sEjCY+0HnPpyRJUifz+kAOwIIFC9rdSrV69epWXz/33HM899xzZ2BUUk9WWW/jx/1FFFVWc2fJ22KVbeC1IkAy8BrRqSprPax/Qdyh3yXw0bVtL+RywLL/E4GYpiDOsTa/DvO/h23viGBPE/94GHGzaGteWyxWkapy4HcrRdaAMVhMcNR6SLkRYkaJlOOkGSLgo/MR+9clyUukF9cCEOHXwUBOTSEYApq/jDIr2FhwioEc36O1I0pTZSBHkiSRFTvxAZFRsvZ5qC8R2SDhg+DaRVC4t2VRx+UUWbdb3oboUTD4OkjNhOB+0PdikW0y5Eaxrbqplo7LCcsfgKveEpke3kSlFnX6GivF1ysfFnX6QvuLLVc5G2H+CkjbKLZeJV8GKddB/k7Y/5WodVO8TxRLrszy3PmqNE38PY4YKs7xixZbyD+eBdcuFFuhFEqoKxYZ0QG9RPDt2CBOE0cjrPk3yhG3ot3yFqoLn8aVU4SiOh9FxGDwi2udpeO0n7jleEVm29tcTlCewrZeSZKkLnBWBHIk6WQ4XW6KqhtpsDvYkVWJ0+3mqiGRKFLHg61GBGqO3S8/aJYoZLz+RVHP5thAzLFKUz23HQVxn6I9Yl/48qMrO1MeFqt4OVvEhNA3SnyfhgqoKRBtOwfPhiE3QOoKUQvn8/mt96cbA0UmUEOl6AaRs0mMIXK4mKjIfd1SD5NeXEuIWYde08FJcHVhSzcTIMKspDDdQY3NjVmrOMEdPTAFg9ogVoITp3fsvpIknZ1MQZAwFaJGiGCLzgJqrdiCWXtcp8j+V4pgxZFfRTDYL0a8N39zD9SXigDPrIXw639F0V2AymzcTgcKbTvzhJ7MJgLwGAPhwqdEjcCczRDYR2ToOJ2iu1T6Ktj7hciqiRsH130iOnjmbxdBmL1ftP89Di2H+ImiYHRtsbgWQFV28wJa3fRn0SdOQdXvMvH925O3DSY/COPvQ7Xwipb6hCDak1+/WMy5ADQG3BFDUeRt9fzQ4yZTXWslSG0Vi3w7PoTqPOh7idjidQ42nZAkqWeRgRzpnFBvc7AurZQV+wq5YVQMyRG+pBXX8OiydF6dmILqvYva3mn3YjGx840RH/5ORHGCD5ROu8i86XOBKPgXNhDentYSGKopgMVzYOKfRJ0d32jQmUTqdnBf+PkfbYsM1peJbV6XPAcfHdNdS6kWbT37XSS6ZUlSD5FWXEtkR7NxQGytihza/GWUj9gqe7jSRUqHO1cpxCS+9FDHxyFJ0tlNb2n9tdshgr8KhXi/7ncpxIyGxTeK+jA+YWIr1e7FLffZ96UITFzznnjfrjkaCNL5tG6F7S2MgaD3E92gVjwouk8Nng0ak2jqYA4TW89SV7TcZ/encHCZaBG+5Bbx3J2oJplSLTqBBsSDrUHMuZJniK1r136IzeFkgy2RiXYbqsby3x6zKURsdT82iANQtBe+fxgufwl0PhQ7DGgnPIbfJzPaXsMSwX5VP37+ZR/3BG1H9d3/tRw78I14Hm5aLjM7JUnqVrLao3ROyCmvR6dRoVOrWLq7gDd/zUCvUaFXuVHu9JCi22THhzDjeYgd2346bXiKKNTniVIlOjasex7G/0HU2ll2v+fsnrX/Ff/95DooPgg1+eJDZ+5WGH6z2Jd/7QdipWv8H0Q2js4E17wPvc8T93U54MtbRecMSepB0opqOr6tymkVmWrHZuT4KFAAaRWnur0qSmTkSJJ0brPWimBL/k5RWLehqtXhaqUvrpI0UUMHYNg8+OVfouYLiIK+xwZxmjisIiNnuGi6QVAi2BtxN9a0PbenMwaK4sB7PoVRt4u50MpH4LN58NOTYlHp2CBOE1sdbHxFdLXSW6D/Ve1/j6FzRSbN9w8BbrjmXZGZ/MZE+Gwuqt0fMSjAhdLgB7b6ExcgjpsgMqsbqzwfP7gU6kqobrDz8Ff7eGaXltLLPwL/OHFcocDRaxpZMz5jwbJiruijRrXCQ52c6nwRwLLWtj8WSZKkLiYDOdLZy2GFhkqsjQ3UNDp44LNdfLQ5m5HxAaxLL6G0xkpisA5FdX7716grEanELidMfaztca0JLnoaokd7zoCZ/BfY9YlIF85aJ9pmVrcTZHE5oeSACPzkb4N3povJ5ZVvgrUKFt8g2nd+PBsKdonJTmWuWCXrNRnG3CWu43aLFTFJ6iEa7U5yKxo6npFTWyL+e0wgR6dWEGJUkFZ+ip2rmgI57W2VlCTp7FdTKIrvvjQc3pgELw2DL24W3aYQiz83LjyEI+ECCB8MV78n6rmc/4ToSlW8XyyytCd3q6ido9bD1EdRFOzA2Vh9Zh5bJ3K6XLiNQaDUim3kq58WwRuAgDiRedOewz9B/GSRcWMOE3Vwjhc1QnSj2vq26MipMcCHV4j7ut3gdqNKX0nIp5eiri+Gr26DfV+Jxazj6f3ggr+LZhLtcTnBaaW01srK/UUs2lHGTb9aKLr6aw5d/RMHrvmFRTF/I7vGxZIrzEQrSuC6xSLYdLyD37Q8F5IkSd1Abq2Szj62WijPhI2vQulBtGGDSR5yMxcnB/DepgJww7h4XyZFQlmgGfTjIONnz9eKHA47PoCEaWKP9KyFYq93TQFEpEDC+bD63yKYM/drOPCtKABoDoehN4q07IgUKDoABTuh13knHrvGKFb/3pwiHoclHLa8JdqdHuvwT2LF6+JnxFh+eBRmvipWlSoy227FkqRudKS0DjcQ2eGOVUXiv8cEckBk5aRXnkbBY2u1CNL6hJzaNSRJ8l62OlG/Zeei1ren/QBLbsF17Yd8sqWcOYNMqL65Cwx+4v1cpRVZtsUHRMHfo530PFIoxBaf6z6BX56GSQ/htjd06cPqbK6aYpSfzUUx5S+QdIlYSGp1gkM8J+1RqkWnwLUvQNp3MPN1sVV2z+fi+Rl8nXiOUr+DK98Qdf52LxZ/n4/XWAm7PobeU0XHz7H3iGzkvUvE3/Jek8W2t2/uFQWS22MMxKEy4bC7eW5WCst257Mls5yf85U8uKSQ5HAf3r7QQPjy34kGFE2PY+g8Md9afsy1XU5RgFmSJKmbyIwc6ezitIlCea+Ph50LIXcriq1vY3x7In9KruSr349icnAtT5oWE/XVFQwmHXe/GW0+KAJikjZ0LmSuE5kxW96CL38vOjmEDxarU4uuhoyfRNaO3g80enEs8UKRcrv6aZGKbasWKcZKtVil80StFxk++dvhttUw8jaRVbR3iefzczaJCWlTfZ5Nr4kCySDq8UhSD3G4RKSfh/vqO3bH2iLRsUTn2+rmcB8l6RWnmJFjiRT/LUs/tftLkuTdaoth98eej2VvgNpCMkpqmRClRnXh0yJooFBD3HjRZhxEMCfSQ4ZJkz4XQNE+WHSV2AbtHyMCQl7EXVOAomDn0a5diradOfO2QfyE9i+QPFMEbZQquPZD2PqOKJI89VGRqfPLv2DhFbD5DZFx3FgFmb+2f72s9aKjKMD6/4kC01oTTHsMCvfBhzNFtnJFJsSM8XyN8fejcDawcO0hHv1qL5H+Rp65ejBhFvHe9Mz5gYR/eXVLEAfE49/6thhfzOiW2yOGgN4XSZKk7iIDOdLZpaYIvr6r7bYJlwPjj38hSZOP4Z3JGHa8BUNugoosFHs+h7lLIX5Sy/lhA+HqdyH1e9FaPGa0mIj0vwL2LxVBk/RVYi+2QglqnUjnXX20q0PJQRGA6X8FjFkgivfVl4ng0GUvisnHsRQKkRK89ln48nZYeJWop+N2trQ+9aQ8QxQdVCjF5NQ/XqQwx43vpCdUkk7f4eI6fA0azPoTrGB7UlssJsrHFcqM9FGSW+um0XEK26Ms4YBCBnIk6VxlrRbZFO1QFu3j34OLCfNRQe4WEYjwCRPv25YIcZLbJYreTvi/thcwBsLUv4HBF8Y/IN7bG6rQuE/wXt4DKapyxMJT1EjP2YsOqwiujLil7TFLhNj+FD5IZOV8dac4zz8eMtfCqr+1zRzO3ig6V+n9REes2YtE1s0Vr4vaPMYAkamsUIh275c8C/0uEfMvl7Vl3vfzP2HsAkiZI44B+ITCBf8AaxWqV0fztwFlfHljHGpHPe+uy0SjVDB7RDQhVbvar6+z+U1xTRCLchf/R/ysJUmSuoncWiWdXWoK2nYqaNJnGtqf/iYmAqYg6Huh+DCXvV4ETobMgVG3iUmEMVBM0oL6wE9PiMJ2SjUkXSYKDn9xa8ubfcL5Ipiy9W0RUEm+HByNoqPD0rvBXg8BvUS675a3oGCH2HN9ZA3kbhY1O5JmiJWrprablVmw5j9iItTUMcMTQwCMvB1iR0N1gRj7TctaJpuS1AMcLqklwq+D2TggMnI8ZMtF+ChwuSGr2kXfgA52rlJpRbCzNK3j45EkyftpfU78vqq3YNb7iUBDwU5Y+VeYt0xk4SScDz/9XQSCtn8guk3O/w62viu2+MSNFwtBn94AjdUw/n4o2A0hSV63lVNpCRdBlMZKUKhEF83jC8Wvf1EESG78UjwfDZUiuBKSBB9fJ+YyEUPhon9B4S7wCRZzJU/2fy3O0xhFxs26F8TtpiDxPEYMhSW/g8tfhsK98O19Yr6ns4iFr76XwLf3isW0z24SGUFXvCG205bshx0LRbAIUH6zgITz/86fbSvInvwnMh1OrhwaieXQ557HBi3vR4kXwnkPi+wiSZKkbiQzcqSzi/u47RbGABg2X2TVJF6E4shqcdvV77d0Vdi7BOqKYe1z8MkceO8SccwYKIohNhVDdjlg3xew6gmY9jdxW2ACjLsH7HUiQBSYILJu1j0P294TQRwQE8Kv7hCrU047fDBDTO4u/o8ICH06F/Z81nrsB78Bhw36Xur5sYb2FxPD8nRYdI1IM150Fbw1TaQ811e0TYWWpG6QXlxLmOVUWo8XedyOEGkWb12nvL3KHCEzciTpbOZ0iG6Strq2x4yBIqPDk9ABIkhTUyDe73cuEvOH6hz4fL4I4lz1tljYARHcWP5H6D9T1GlJXQEfXQtlh0Vg5/uHxHmWaBGg8CY+ESIj0lYrgi8zXmwbjDIFiaydlY+Iuc+Uv0L6j2IeVZklzsnfDovniMyekkOi1kzogLbfrzoP/GLh2z9AxuqW2+tK4fu/iPo6Y+6BjF/E3K1p0c5aLQI/edsg5ej2cqddzKm+uQcqjsDXC0R2VeKF4ufZ92Iw+KE9+CUJ311PkqmWSD8D6qgh7T8flkgx7oueEYGkX58V46wuOMUnWJIk6fSc8YychoYG3G43RqN4Q8vKyuLLL78kOTmZCy6QdT2k02SJEJMle71IeQ5JFm0zC3aJbUrXfyaCI7Y60BrEfuxek2HQtWKVrq4MqrLFqr1PCAz/negAZTumxWTRXrBEiWCQb4RYDcIN05+GNf8WwaSczZ7H9+uzImV42f2QtkJMAm31IkX5eE67mLhMfURMVI780nIsdIAIAqWvEu2ZJzwgsoAOLRdBow9nwqxF4vjw+WJypJIJeNKZ53K5ySitZUiMX8fvXFsCkVFtbjZrFVi0cLjyVOvkREDpCTqbSJLknVwuEUDY8SEc/lk0HBh3j5gLGANEFk75YVH/zlojMmNBBCQmPSRqq7jsYh7RUAk5W6DP+fDuhSLAk7pCBCtuWg6VOeK20lTALYI8/S4RWSwqjZgH7PoYNryEu+9FKLysRg4KxHxpz+ci28Y/Dm74QixCVeWIr9V60TmzLF0E3cszPLcjdznF1vGQZPGzufINWPFQ6+1V5jBR36YpANQkqA+c95jokhU6QCxe6cxts693LhKt4Xd82HLbyFth9yeintGUh8Uc6cgaMb9T6UTW89rnCKg6gCEmARTDxe9JfXnbxzD5IRGce+/ilkWyNYj28jd8AX7RHXyCJUmSTs8Z/2R3+eWXc+WVV/L73/+eyspKRo0ahUajobS0lGeffZY77rjjTA9JOpv4hIoAR+5m8Ua75OaWY/nbRfrzjV/CqsdhxG0w8xWxcrbyYVAb4IrXxHnvXCAycBIvFBOD7/8q2o02KT4gjlXnQlUeaHSAEqY/6XkS06QyS0wYm+RuEgXzDq9qe27UCNH9yt4Ilz4nJhC1RaLLg95P7P2OGg6OBkj/QezfH3OXWClc/gAU74NDy8RK2u9WiCLMknSGFdU00mh3EeHbwYwcp10EKfV+Hg+H+yjJOJ1ATtr34sOFsoNbsyRJ6rlKD8Hb57f+kJ/2vVjsGHuPWOT58veizfi4e2HaE1BbIBZnvvsTLD9a8yagF5z3CFz3sehmFZwk3r/tDeK9d/ENYpGkLE0EIGqKxLkHvoVl/yfel3ufB1e/A6ufQuFyiuCAyYtqqjgdIpiz7V2xbdwYKP5m/vQERA6D6FEiY2fmayJw5bCK+oHtyd4gFs0aq0QW04T/E12mmsRNEPOvY424RSyobXgZ9n0pGlr0uUDUzvnxMSjc03KuyyECTyAW5sYsEPOtwkUw43n49MbWGVqpK0R2zpAb0WX9DANniGDMTctFlnRpqjhPrRe/P/ET4J0LxXNxrNJU+PFxmPEC6I6rfyhJktSFznggZ/v27Tz33HMAfP7554SGhrJjxw6WLFnCo48+KgM50ulR6yDpMtwxY1AsntNyu1It2oArNaIQXvxE8Ya94aWWrlDXfSwmeNV5Lfc78I1YvbnmPbF9qanwsNYEi6+HqY+JSeLez8UHz6BEMVlsz/HtSs0RYAxue55SLSYOn94gtnaFJMOlz4pJS58LxGTz4mfg6zuhprDlflnrRav0aY+LFTNTsFjx+uZemLOk/UlkQ6X4Pge+ERPVfpeI1TYfD2OTpA7IKBET5w53rKovAdztB3JMp9GC3BIhPhBU5YpCnJIkeb+GCrGI4alO3q//gQFXiff3MQtE8GXfl6Jui18MfHh56yyM8gzY8jZc8l+RadL3QggdKP4/d6sIOsSMFdt+rDUiqPHZvNZbNtN+EIV9Zy0CtRa8rNgxTqvYoj1oFgyaLR5b0R7xPKTMEdudmha4ghJFIMMc1v71jIEtP5vKbPG8q/WgMYgtUSNuEYtSTQJ6ido3H88W5zc5tFxkU1/7AXw0S/wtb75PPMxaKAJORftEJvbw+Ue7h3rYZrf+BVFXsOqYeV9IEtz0rcjQdjSCMUgs1lVmi1qHpiCRJf3LMy3ZQ/u/FNnTMpAjSdIZdMYDOfX19ZjNZgBWrlzJlVdeiVKpZPTo0WRlZf3GvSXpN9QUisnG4VUw8OqjabgZYkKQvUEEKWLGiK1GClqCOJHDRJDk2CBOk8YqUYSv78VwYKmYdFjCxZ74HR+IyVqT0lTxJq/WiWBP0mVixap4v9jm1G9G6/P7XyHOveAfYiWrvlwUSxx5K2x8taU+T/F++GCmaEv+4UyIHSdadh4bxGmS/iMMni0mH/u+Erfl7xCTXE+BnPoKEdD69T8tt617TgSMLnsJzKEn++xLUhsZpXWolAqCLbqO3bG2WPy3ne0I4T5KthTacbvdKBSKjl3bfLQYeHmGDORI0tmioVIETtpz8FsRvCneL96fL3lOfPC31YHOt3UgJ248jLgV3pgkPsw3mbtUfIC/4bP/Z++qw5u62+iJN0ndDWlxd3cZNhyGDocxYe7uzDemH9uADXd3d3d3arTUJW0aT74/TkOSNumgsA62e56nDzS5ubm5afJ773nPew7HrusO4jqdcdG975ZJB5yYDbR9GZAZS97/AMOmy4EINqpb0i+wRojtxBSuU8Xi2zOvAPMHU81y6Bf3O2w81lFzASRb+v5AIsYrkPHkDYaR3DHraSR9Y5criWOHIZ8eOLX6OPYZVhfQ51MtZUejIoJo59SS+6jUmqqs1PMQa5KBy5toVO0XRXW3d1Hto0kBlk90jOEBNFDuM43qq5x4NvkET0IBAgSUM8rd7Lhq1apYtWoVkpKSsHnz5tu+OOnp6fD19S3vwxHwb0LeTdjmDQL+fBSivd/SlHjRCKpSjvxKM+PD09k1O/C9a3EQ1dh1kS6OG7tJ9shUQL9fmKZQoYUrKWPHwZ+B0etoiJefys5RYCwLwJZPsvgQS4BObwEJ+1hESpWcsR63keTOiieA6ztc92vW00A5sjFQpRMLUk+4upWKo3xnEz4PCR3ZN1igKQOAlk+TvOn2Cce4ShsTEyDgDnAjowBhvgpIxXe53Ggz+a+Xn9u7I71F0JqAjMIyRJB7h/IzWDz+VoAAAf9e2CxMq6rUGhgyF7i2hRfoOz+hj073qbzfOwzo8QWw8glXEkeuJjEz4BdGUV/fBuz9hiqNC2s8P++NXYBYzMc/TLAY+PoP/MDvzC7vc0R7/UscC284gqNj9u9oo5aGw90+5Xl0RpUuJF2KUqMgkZH8WPEEjYgvrQUajeAo+OA/6D0YFEufI0+4sZvj5QDTPx/5iI23Sq0d25yc52iIOSOqCdD8CWDJGGDjqwyoWDiUY/XZcY7tjFpg6/sl68O8JGDNc3zvASqYH7b3V4AAAQ89yl2R895772HEiBF48cUX0aVLF7Rq1QoA1TmNGpXiFi9AQGkw6WDd9QXEaedcb7eagZWTgWELHNHeABBYFZD7OD1eT/M8T1D4UAUzaiWwYyoNj+0XmsVRoycLxD1fOW5LPgGcWgCMWUeTP5mSs/R235oNL9NAMaAySSJdjvt93zwCVG7v+TjtEEmA04scv4fXcxvjDKuZkejVuzPd68jvLHy8Q4BGo1iYFKQ/dLGpAh4c3MjQIqJMiVXp/NwVH0csQqR3UXJVrhWh6rskicQSwDtcIHIECPg3wcsfqNASSDrk/v7IxsDJ+YyOXjTcETKQm8BxqQbDORpdtSuVPfb7Y9oDjcfw+0gVBMzu40ikRJGnizLQ83EpfItUJm5CDR5k+FVg6paXHw2HV05mLWNH4iHWFn2+Z9w3AJxZTKXTyOVMiTJq6feXl0hPwehmvL3ZRNdmVFA1hlLEduT389h1VEn9VV0WXBMY+DvHZU16xo+PWkEyJvMKm18Zl4CIhoySt6P1c8CaZ12JOoDjtmufI9Gn9Oc6dH453EKTzOaeTAV0/4xKbQECBAgoR5Q7kTN48GC0bdsWt27dQoMGDvPVLl26YMCAAeV9OAL+JTBr0iE9s8j9nVYzF/KgqpQ+1+gFSGWALtuRTnBlE9D9U+DyRvf7aDaB8l+DBugxlakI7pQCIjEl2b+6IVsM+Ux3GDCdSp2WT1KJo0kh0WQxkTDxieDxuoNvNH1+vMNp9qfP44XumcUc/7KjWldKfgGObvX5wdVk2Q6LmUVTgxGMB7UWeY4YNMC2DzgaVqGF+2MRIOAOcCOjAPWj/e/+gdp0j/44ABCqEkEsAuLzrGgdVYYD84lgeo0AAQL+HVAF0NNmwVCOJ4fUIHkiUxYlP+5lWtWer9yTKqcXAqNXA2kXqEgFGCDgE0HvnTbPk3y4TeIU4cZOJlWdnOP+uJqMARTeRf4wD9HFvtXMOmf8ZjauWj8PyLzopXdyHrdJPUvSxE6UNHwc2PASkHQYCK3D+uPYzKLxNR+ep9wkEkPHZtEPUOoF1OrN9+XCKtZCCh/WLQ2G8TZ3aDaJRE38Hja/IhoAXd9n0tier/i+9fgcuLQe6Pg6sHQciRsvP5pRGzTu9xu3ByjMIpFj0jnqIncozAKGL6YHowABAgSUM8p1tMpkMkEqlSIzMxONGjWC2Elq37x5c9SsWbM8D0fAvwgWs9HV8K44CrMdnZ2GIzhmtf97FgoyFROhNCksGoqjWjd2hv7sRdO93zrwdnUoR6acEdMRSDjo+TgSDzJuXKrgPPaWt4GLqxl3fm07fW2aTfT8+PavsGBY8yy9cpaOAZaNZ1HU4XVuU70nI1a9w/haR6+h7NgOm41FrM3GoqzRKBoBuitWLq4hWSRAQBlgNFuRnKu7e6NjgNHjSvdjVQAgFYsQphLhRl4Zk6t8woEsQZEjQMADC6v17lUsITWB0avY2Fg4jGvkwqH8vUIzBgc4q0qK49ZpkhBhddj8Ca3FVEtdDn+/dbrkY4xaKnraPF/yvujmHJfW5QJS1d29ln8ahTkkSxYOB/7oyXO5aCSJl15OiuOLa4CqXfj/8LpU6thsVC4nH3eYDBvyWfsUZgI7PmFzacRSYNxmYOPrHDu3+8wY8qkACoihGqo4YjswjvziaoeC2f7eWE0kzlJO8Jhr9wPSLjG9s9FoEj4mXemv3f53p/AufWQqqgkQ2x7wugdrCJMeyEkggZgTDxj/4tgECBAgoAjlqsiRyWSoWLEiLJYyJo0IEOABRrESiuBqQOZV9xtENGD3BwBsVi7iqWc4Fz14JiPIc5OAhqOAphNonmcqBOoMZNd+5ROOfVnN9NpRBQFDF9DnxlRITxuZF+fw/wobX6NJYr0hgLEAOPAjUO8xxpx3+4hjVrs+47HK1UDt/kCdQSSjNrzsanJs1gN7vgb6/cx0DGM+i6c6A4CMy5SBt3yGs9wFaVTvpBxnB63BMBogO0erF0fiQRZnAgTcJRKzC2G1lSGxCqAipzhRWgzhahFulDW5yieCxuA2W0k/BwECBPxz0Odz3On4H/Syq/oIU6P8K/L+/DSuexIZVXuFWQBsbLhYDIyZTr/o2J/NRhWJTM1mSGmQKQGTlkRCmxeAAz+57scT9k/j6HTt/kyxNOTTE8a/EpB6jiTQw+ahIvMCZvd2HfW2j2N3eovj5gn7WadABHR8i9vEdqKPn83G4Alnf5ms6zR/Lkgn0XJmMTBuE1VNxWExAnP68bw2HEE1lNlAYsaQTw/E4ri+k8ohs4EK6cSDwOFfgR6fsckWGAPEdABCqnt+3epgQJ/LY/UOZ/2058uS20U14fjZvSA/FTi9mOEbMiX/VqRKoN2LpSeACRAgQAD+gdGqt99+G2+99Rbmzp2LwMBSZooFCLgDZGuNSNPoodF5oV7nqVAteazkRlGNOUZll9GKJY77bh5jtym4Ok2RL68Hen3NQuHEHM5L20eUiuPIryxk7AkNdQeR+CmNyIluyovGNi+wy5d9g4RQk3FUzXR5lwqYCi2A8VtJyshUTKja8hYvbHt9TfXO8T9c933wZ6DLB1T1nF1KlZEdcbuBao8A8wY4Ok1Xt7L4HLG05Py4MxTenl+PAAGlID6Tndhwv7v1yLHx7zeqSalbhavFuJhdRiLHO5wXg/mpgreBAAEPCoxa4PwK+pTYcXULsPtzYMJWEjRb3y1aOwOpGAmMBVY/DcR25giNM4njjHPLGEUd095zuEHFNiQZtr3PBCbnMWeblYoed40PkYgkx9oXgU5vkMDZ81WR+kPLRKXSiKAHEVlXPfv1HfmdY0wJ+xlPXvUR1hZiMX1zjv/Jc1KzD9DyKRoaF2bRB9DZX1Cm5HiWJxSkAtnXeD6rdmUza/Uz7hPCAJJlIhFwaj6VLlW7AjUfpSp7/YuO7dq/wmO7tLbkPtq+COz4mPXhqJUc07PZgMzLJOekCo64RzWhn2BZYSgA0i8BsDIFTJfDmrJ2P+DoTKD1FI9m/wIECBAA/AOpVT/99BP27NmDyMhI1KhRA40bN3b5ESDgTpGer8fbK89i8tzjqOQNyEKrwTpiKaXVAAmQZpOA/tOBm8fZvRNLAZ9oh8mwHZlXHJ2lwkxe3OXdZKSoJ2gzuW32Df7s+YodIrGMz1scMiXwyCc0DwQcRqsGDWXcV7dyfGtWd2DVk0DODV5o/tGDBWjGJeDyBkZrBlUhMeOMrGssGDe+BvT8wvW+ZuOBFRNLytQtJt7e+W33r1EkFjxyBJQZcZlaeMnECFC5Nyz2CF0e/zb/ooiN8BbhpsYGk6UMF0j2bmdO/N0/VoAAAXeEAoMJaXl6ZGvvMHq7IB1Y90LJ20NqUkG3eKRj7SzM5ljwpfVUYaQcL50UsJpJELd53mH+rwoiEST1Ajq9A6Sd5fiyTziQfIwmugAv2kUiKlEk8pL7bvkMCajUUxzpWjaOhMDxPwC/imwgGR6yMeW0857v02bQxDm4Or2IFjxGVcmiEUy5yr5BRcv+aWxa9Z7GRtdtBU8R7N45nmBPEctLAq5s5li8fYzLHRqMoIon4zJVXcdmAismlTTO3zcNqN0X6Pim428hMBYY8BtgMnC8zmLk+2g10ycxuBoTu5aM5r/XthapwcoIbTpw8Af6EWZcLjJWXslzWKHZve1bgAAB/wmUuyKnf//+5f2UAv6FsNls2HYhDT3qhuPVdsHwP/otZOcWApN20EzPagYkCqpj1r/CLtATu/ngm8eAVlNoZHdiDiXXAAu5Hl8Au6YCIbWYcBFY2fNBqENKmuXlxHEhbjIGqNyGXRVtBpMamk9iwXl9J8eklAHswLR9ETiziEUKwIKjRi8WDQuGuBY9duz8FBg0wzX+PLgax0Ue+YgmxoGxjoJXGeg+ghMgIaUMZLFkN3i0o88PvF2AgDIgLkuLCD8lRHc7uqRN57+lmB0DQIRaDLMNuFlgRYyfpNRtS8Cn6O86Jw6o1OruHitAgIBSoTNZcCOjAN9tvYJTSbkI8/XClE5V0TwmEEHeCs8PvHnE/ZrXZAyw6U33j7m4BhixBDj4U+kJixIZiYHNbzNgQCTm6JY2E7bophB5+bEmODWfSUxGHdDiSWDre4yqXv8ySYvhi4CzS4CUUyR8Go7gWPdBpzGsjEv0ilGHsJawWQC9BkDMnZy+BwP2ppg7eIeRBOv5Bckz30jg/Cr3xHjWdTbG6g8pOQ5lNnC83C+a2xRHlS4ARLwvrDZQvRvrlbg9JZVXLZ4E4vdSge2MwiwqiGr15dg8UJRo+gTQ8W1g3EZ6snn50GdHIgWGLeRY1v7vqYze+TmJOjvyb1EZ1ONz+hp6SFeEzcamhNQN+Zef5pqmevuc6DnSV7whJ0CAAAHFUO5Ezvvvv1/eTyngX4ik7EJEB6iw6Ggi2lU6Da8TvwNP7GH05I3djg3l3vSO8YvmQu48jiSWcqGs0IoXjpXbAvu+ZXGQeRV47E/O1KuD3UeNN3+CkeLFcWUTzfZ0eUCf72jYqEkBCnMd40tmHbt6YgkQ1RTYXTR/HdEA6PIeCaC8m+zQuINJxwJIInOYA7Z4ivLygjR2lfp8z2SJC6upTioVNmDiduDGLh6/f0WaIPtXePjm+gU8MLiRoUWoTykXbZ5gHwtU+pe6WbiaBFF8XhmIHKkXL0QERY4AAfcdJxNz8PiMw7AWieUyC4x4av4JTGwbg+e6VIOv0sOFrycTWqlX6QqFnHg2R/Jucowp7VzJbeoOYsiA1ItNlFVP3TbzFwFAheZsXhz5jaqL8PpA3x9pkGsPRdBmAElHOGLT7VP6vGx5x/1afXEN0OsbwGykIre0QIYHEf4V+R3p7ry3fg44s4R+N0PmciToyibP+zq/AqjU2j1Jd2YpUzgXj3T1/wuvB3R+F5jT1zV0oekEoM+P9Dm8sQuQq6jEKUj3nER4ZQO9B+1EDkCS5dZJoEZPjpFtesP1Pao7COj9Hd8/ZxLHGTunAjV7s1ZyhlFLkvD4bDYLYjuyQedf0eHJdm1rid3dRtxuEo33CTab7e4bKgIECHjgUe5EDgDk5uZi2bJluH79Ol599VUEBgbixIkTCAsLQ1RUWXJkBfyXkKbRw2CxQiwC3usYhMCl31ESfWyWK4kDsNNzdglQq19JTxmrmR22J3ZzlnrpGHpljF7N4kUsZfE2cjnHj+xGyhI5OzBevkDCgZIHKJECtQdQoj1/CNDxDWDDqzyWQbM4xhUQQ/JIHUL5L8C5664fAItHcduGboz8XFC0KMtUlIrnxDsUOMvHM63KZuHr8Ymk0seQX3I3MiW7a/4VgMajgIbD+doFCLhHxGdq0TK2DF5o2gySlH9BQAYqRZCLgbg8KzqV5QC9wxidK0CAgPuG9Hw93lpx9jaJ44wZ++IwokVFz0ROhZbub/+rNcnLjyTQ7i+Ax+bQ4yTpMO8TiXlRXr0HsHwiMGoVsGh4yfUw6Qi95tq9QmVu6hkSQl6+jhErgBf755ZTnXNhtYPEkakYWhDbkRfr+WlUjvhFARrJX5q3P3AQy9jQWveiw5NGIgeajqcXzonZvE0kIkEjLmWEViJjgpe725uOY4pV1w+ptsm+zqaWXA1seovNLVUwlTYn53JcKqIBcHY5EFCRyaKJBxkQMWKJ59fiLpmzSlfWSRteLulhdG45/QN1uZ5fl0FTRDI5ETkmHXB5I+tG+z6vbOLf5rhNQKjT+L8nSGTuR/juEmkaPc6naLDq5E14e8kwtGkFVAxUIUB97/sWIEDAP49yv1o7c+YMunbtCj8/P8THx2PSpEkIDAzEihUrkJiYiDlz5pT3IQl4iJCrNcJi0CLKlgOVOQvBUj8WZ7EdGI/pDk3GAds/8rzTYzOBBiOB4Qu4sO7+Ari0jkRP1a7AIx8zncpiom+NTAVYjcACN1HlITWA7p9Rdiv3Btq+xG6OMpCL/Z4vge6fc5G32RhFbh8fqd2fCh9jAX/X53FUKv9WyeeRqxmLOmgmi9RTC+ifY4e5aMb71hmmVo1eww7WsrEl9/XIR4DaaXxKIHEE3AfojBakavQIL3P0eMBfpkmJRSKEqUWIyy1jBLl3mKDIESDgPkOjMyM+q9Dj/WeT8xAb4sFE3yeMCpiTxWrBnASqZuP3lXyMwpcqG/uoc/oF7uPRb6mGkMgAmwg4+SfX9MIM900NgClKE7dR5ZF4iE2X3V/S0Nd5/Fgk4ghz72lU3Cr9uc6fXULiw6ABqnfnuM+JuUBUI9hCa+Gh0kQYC2hSPHQePWrsI0LXtrPGGDYfEEn52rWZQJ3+VCjZoQxgqlNhFuub7BvA4ytIlmmSqbhpMIxjRDd28mfoPI5ihdTiua7Zi+NNuYmAbxTPZ52BJHS6f0YzZOf6x6BxVSrbUWdgScVQeH2mV51d5tmI+tgsjuGVBmkx1WlBOtVexfepywHWPAuMWEyj7pqPknB0hzoD73msPTVPj4mzj+JcisMCYMHhRIxvE4NnO1cVyBwBAv4FKPcrtpdeegljx47Fl19+CR8fh8FZr169MGLEXykQBPyXkaM1QpNxE9Fnvofk1DyorGaSGDV7s7gweSgc5d7uyRA7NClcYINiSXrYCzWFD1C9J2fdc5M4YhVSE9jyNtB4NFCxFbtAdsR25LjV8okOlY1vFFOmIhuyoEw7T+WLQcPZ+vxUjlcpA5hotedrx/5OzqXfz/7vgaRDlOra0fUDFgtLRnt+Xfm3uN/MK+w2DV8EjF3PzlfmZSCwCqXGkQ056pV9jX5BJh0LjICYe0tkEPCfRmI2P4/hvnebWAUqcu4wrSNcLUZc3j0QOe5ibwUIEFBmiP+CrfCSljIGqQxgemNMOxrb+kQA7V4ikdP1A2DJGJIAdki9gL4/0GAXKEo28gZWPc3UqcWPFx2UlOPGUU2B6zs8P7/FSIJGGQiE16XSpMWTrAF6fEbjXLEUGDqfxMSSMfSn6/EZELeXF/V9vmcdsf1DIH4/yYmMy/gH8kXuDYVZjhh4ZQCw9xtGdz/yIXB4Ol+fzcZmVIfXqIy5uJaKmkc+ok9h5mWOE0U0BNY+T8Kn5qP8js+6Rs8cewiDTMW6o4qE6uj4PXxOOzTJTCxr+RTNp5MOcazNGd5hJUmcoKr0WNr/A1XQMiWPL6QGR97zkjyfg4JUvqeeRuwrteV9zkg753mM7uYRKotUgYA6FOjwOpuHzvCNoqm2/K9G4j3DYrVh2fEkFxLHjln749C3YYRA5AgQ8C9AuRM5R48exa+//lri9qioKKSmprp5hAABRKE2DxEnvobkzHzHjTYr59DrPcZiITex5APzkmg2eHWz+x3X7E0PnatbHCSORE4z4Z2fucZyS72Agb8D+lwutFnXGVkuknCEamY31wVckwwseZwkypXNJEwM+SSJen3NTmJ+Gjs+KadYgBZmAp3fAwJj+NpCagKtnqFyIPEQZcgn5rCAcjY0Lo7weuw0ASSQDBp2NIcvJFkjVQKqACZ/HPwJ2OtEIu2fxjjRfj87TGEFCLgLxN2OHi+DIkeb/pdGx3aEq0U4kVbWCPKiDrtJx+JegAAB94wAlRyNKvrjZGJuiftkEhFqRfqWvgPvUBrjVu1KAmTeAH5G/aKBbh/TNDj7BtMbo5uRLIlu5hgxXvs896PP5VpuMfJ+gEpbv1JG+O1mycdnMRih4Qg2aK5tp/r3qYNUy15cy/33/YHEwe+dXFMho5uyVlgymttWbg+bSAKDyQIv2V36ef1TCCwyZj42i6reOgPZ2Do6E/CNBkYsBY7+ztpm81usa9q9xPuWT2ATyQ6FDzBgOrB3Gkm6I78WkVtFkHrxfpmKdYnF5Goe7YyjM4BRqxkS4YzIRoB3OPdzYQ3/ZuoN5t/NqQWMna/dj1Hmx2cxkSqkFlVB51e6f67IJhy5GzofmD/YVcnlXxHo9xNJrsJsvv8KX88+T3ZYzdzm5mG+ziFzWevpc4HYTvTsCahc+j7+ApkFBsw/7KYeLsLCI0loEO0v+OYIEPCQo9yJHIVCAY2mJEN85coVhIQI3X8BJZGtNSBPZ0KYNRfyc4vcb7T3G/rkrHzC9XapF6DwY7fo+raSM9I9vmDX48oGV3+dugOBcytcSRyAaQIrJgGTdgL/a8XFNrYjzRWP/1myC+NfiZ0giYKz5td3cqb+2Cz+hNXheNSOqUDrZ0lMyZSMWd3mZAx+6Gea+dUfQhVPi8lAymlGZ65wE3UeGEtyyW4aC+B2N9Bi4vPYjzX7hiuJY8e1rcCVjUCTsfzdkM8CViSmh1BxObEAAU6Iz9JCKZPA16sMy4w2g5+dO0C4WoyUAjMMFhsUkrssSu0kZW4SJfYC/hnYbCTUrGZ+ZxfvcAt4qBCgluPzgfXx2K8HoNGZXe77fGB9hJRmgG7IJ1EjEgEQ07vOfmGcdxNYNp4X5lW70nB4w+ts1si8mDblfKEdt5vrZmx7hhhYrfSPaz6ZjY7iUeUVmtOnxZjPesJqodfdvIFA47FcUwuzqBS6to3raM3ePMbia//NYyQ4avam6q/pOEAsgcWdcdCDCq8A1je6XHoEJR/nuu8XzWbS/u+oPjIbaDp84Eeg+1SOCzmTOADfl9VTOO626ilg4G/8vMfvJ3kWUoM+OH/0oEJ69GpXYswZ9hrG2bg4oiGNkbOuMmWsziA2owIr0QNRJAYOfA9U6QRbdHOIDk/n49IvkBBUhxSrl8C/wXYv0tT58P+A8Vu4/6zrJI1CavD76uJaqqkLUvk31OENz+c0IIZNCk0K1WJWC0muql3ZWDi9iOP4bZ6jWruMsNpsKDR6bnBodCZYbTZIBCJHgICHGuVO5PTt2xcfffQRliyhIZlIJEJiYiJef/11DBo0qLwPR8ADjqTsQvyxPw6LjyZhxwh/qNyZ1QFcjP0rskg4+BMJj8jG9JKxWSmJHb4E2PS6w7Svy/ssynRZQExHIKIRyZRr24DK7dwTJADJnKTDVMpk3yCB0/q5kikZTScAFVtw/nvHxzQcbv08ULE5cHI+YMijUuboTKDJaCBhH1BnAM0Rnf1u7Di9AKjcGtjxKSM5g6oBFVsC/f9HiXN+KouVat0pTV8w1PHYiq04YnZhNf2Csq6R7On0lufYTIDnsnpPSoG3fQhc20IlT6PHea78oj0/VsB/GvGZWkT4ed19x89i5IXDHY9WiWADkKSxomrAXRa+3uH8NzdBIHL+KRSk80Jo7zdAfgpJ8Uc+4ujEHf4NCHjwUD3MG+ufbYdN51Kx/1omKgapMKJFRVQIUEHpTpFiMXMkZ/dXwIVVHJduMBzo/wsv/O2GwiIRGwon5gAtn+aFdvYNjla3eobkybGZJAcV/kDLESQQ4vdS+ZpxmevlwN+Z6nh5Pbft/z9eyC+fwLHk8PpUxkpVwMAZHHHeP42KiwZDOS61+ysqVDyN0Zxdyv1qbpKE8qsAdcBD5EFntdADyG7ufGwW1ZKRjYDun9Lrb/3LwOA/SORkXiEp4ymNSZfDmmz4Er6PxgKOdnuH02fHVMh9KQOAv3ITkki5rT6X3xOZV2kuPGwh0GQ8ibq9X3H8ShnImqX9a0DmZYj2fee6ry3vUsWz7zuHB1NADNDxdaqgVYGMJV8+HhizjqoegH+Huz4HDv3i2NeFNVQkNZtEtZIzRGKmYPmEk/Sy17OGfFdFkCaZf2POBtt3CT+lDF1qhmLFyWS39w9oFAWJ+CEb9RMgQEAJlPuK8s0332Dw4MEIDQ2FTqdDhw4dkJqailatWuHTTz8t0z5//vlnfPXVV0hNTUWDBg3w448/onlzN+74xbBo0SIMHz4c/fr1w6pVq8r03AL+PtzK1eGT9RdwPkWD8W1j4O9fbAOfcHbWKrWhf4xBA1Ttxg7S2ue4wNpRrRvQ/hWg70+O2G+Zihdw+75zxH+LJSRT/CuUHhWq13Be394pzLhIYuTmMf4e3QwIqw2scFIIaZKBTa9Rqv34MhYgVzYDpxdyZvzWKaoQTs2HR5xbwW2PzQKe2EUZ+bFZQOd3WGCKJSSCNr9NoiXxIDuSYgmQeYmFhE8EiZzsG/Tzafsi5cf2MSyX15lHwun3To7XaizgfPy1rcDotaXL1AX8ZxGXqUWYbxnGqgqLfAj+InrcDnsEeUJZiBx7Op1gePzPQJdHYvrEn47bUs8Ccwcw4a/OAKbjCHjoIBKJUCFQhUntYzGqVSXITXkQa28BSTepuPIO4xpuR24ilQ5VOgD1BwOp54B933KkqedXNIlt9zITf7QZvDi32YDlk4D080VPKub62nsasG8a1bCaZJI4gMM7RZ9HFU39YUWplTo2VJxHdW6dBlZN5kjNn49SPQJw3T78K5B4mGtn8jHPJ8FUyLqg8Tjg+B8Qdf3wPp3dcoJERu+8IzOAS2sdtyefAJaOJcmWepZNJJ+IInKm0LNxMMCmUOJhYN83HPM+u4yK6WXjqJoCqHJp/Rzfx3UvlNyHXwUg4wrrPKnCodzxr0ilS3RTYMEQR9R5YRaJuMptqWI+OtN1f9k32LhrPJaJZSIxyaTVTwGhtR2ES/pFGLS5yDarEerjBUlBuiuJY8fh/wF9f4Rt4EyI9n9LYjCyMeu04KKGQY7nsScUZrmPab8LqORSPNu5KjafT4W2mDKnZrgP6kf739P+BQgQ8GCg3IkcPz8/bN26Ffv27cOZM2dQUFCAxo0bo2vXrmXa3+LFi/HSSy9h+vTpaNGiBaZNm4bu3bvj8uXLCA0N9fi4+Ph4vPLKK2jXrl1ZX4qAvxm5OhNqR/iiS60w7LiYDrM4CPIqnSGK3wcM+I0dmeOz6W0T2xGo0YtxoQe+d8SO2nF1C4uSgMpMTFAFMj1hx8cs2OywWlhY1Bvi2XMHINGz4gmSR17+wGOzgcrtKcENiCka8/Kg6Dm1gBcoC4ez4/jIh9xPpTbs3NmTN9xBn0cCymblTPbeb4CbR/lTHJ3fIemyeISjgJV7s5OmDnZ0gA7+DAyZ457IqdIFuLHH/cx31nUa9/kN8Hy8Av6zSMgqRMvYoLt/oF3efoceOf5eIigkQHxZDI/FEnaQ7ebkAsoX2nRXEscZm98AKrWk8aeAhxpeunQSMc5KjcBYRkUHV+Nasmi4q2dKpTbA4FkkDDTJwIhFwLYPXEeOw+rQM2flE1Td2qxU2cR0pNGxOoiNDjukckeikUnHZKyYDhyvLu63AjD5asfHDhLHGbdOFXnvNPX8woOqUnUbXBVIDoeotLjpBxGGfI4lOZM4ztj9FdB6Crer9ghrMNhIguhy3D8muAZVKwBHiNq/Cswb5DrWZNYz4bPX10BsF+DGdsd9Cl+Oqa9/pWhbg+P2QTNZI237wD0REr+PjbzwesC1NNf7CrNJHMLG+jG2E+vBOgNJMgGARIaLaYUYv2of3uhRE4Nlx9zbV9tswOopSHz8IJJaz0LDSBW8VL6Qqv0d21TpCBz9zf05imhIMuseUTFIjbXPtsUPO65i24V0KGUSjGhREcOaVyibd50AAQIeOPxjGs+2bduibdu297yfb7/9FpMmTcK4cfyinT59OtavX49Zs2bhjTfcz6laLBaMHDkSH374Ifbu3Yvc3Nx7Pg4B9x8FehO0Rgu+3XIFK4YEQ71oAGXKj3xMAuP8CsfGiQeZ8jRkXkkSxzuMc++htVjExXYC9n7LYs6ZxHHG7s+BLh9QSlscEQ1ZpNgJF30ucH075b0TtpJMshodUnB3yI4jmXJyLos9VTBgNbHLVLmdozNVHDHtHaSN1MtzN9A3sig1o1jsurGA0agjFgOX1hcldBh5LkQi106aTAm0fQGY09/z6zi7jD4FQmS5ACc4osfL4KNUYCdy7mysxh5BnqApYwdTHUqPHAHlj4xLnu8rSKdiRyByHm4YtVyHio/bZN+g98yYdcDCoRyNcUbCfn4HNB5DrxuLiaazzkg7Tz+Uti8xiarJWI61mHVFF8lS2ORKx5DOhdVAk3HAkaILaKsF8PJlspI7RDQourj3gIT9HO8Kq1tytBqgYmf/NHq3NColYfJBhdXs/nXZkRNHFbF3GKC5RdVL1a5UvWx8reT2sR3ZHOv4JptrUgX/Dop709ix92uaLF/bCmReo5dNUBU+rufnrFnMJk5h2azAhldZI5Z2zNe2M/Xq+o6SZI/ChwTivu+AKp35d3V+5e1GlqHmICy/ZEC21ojvtl1Br+5SeJdy+nQGAx5fmAKpWIRFT7REU7XTnRENPTYLrd0+QZbFG7Z8PUK8FWU2JJaIRYgN8cbU/vXweg8TRBAh2FsOqURQOQoQ8G/BP3L1tX37dmzfvh3p6emwWl2/SGfNmuXhUSVhNBpx/PhxvPnmm7dvE4vF6Nq1Kw4ePOjxcR999BFCQ0MxYcIE7N27t9TnMBgMMBgchmvujJoF/D3wVcowc18cvns0EpGbJ5FY0GZSpupM4tih8HOMZdhRoSXw6DfA9g+A1dtIVEQ0BHp+4UiocofkEyR+Bv7OjlxuIjs5dQbQCHn5RNftTYUsALa+R1JpWCnjUQDTqUx6/v/wdG5/dAajUWPacba+eEfLOxSo1NoRySpTkkApPgLmEw60eQE4tdD9c9us7IhV6wZcWsfb/KKBho8DZxaTUKrSBej2CeXrpXnoqAJpAFkOED6LDw/uOXpc4VP6310xhKnuJYJcUOTcLe7bZ1HuU/r9d/E3IOABRUEGcHaJ+/s0KSTsipM4dlzZxLXxfK7npkvaOY4yazPphaPPpfK00eNAZCOIVEFcK32jePHe7uWi8Zrf+RhdLhsp7mA1c1/GAvf3q0Kowh34O0mHi2tIOAXEML3p5jGHh46pEFaZ+r4HkP+t66JYXDqhLhKRxJnZ1aGMSTzIUane3zFWOz+V57/BCKBKJ44idXkfGDqX5z/HQ+omwMeaCoFjf7BGSdjP9+Lx5fTkOTyddVJQFfoPVmrNBptddeUOEhlJpKHzgE1vOr77IxrQM9De/KrWDdj7Hb0LASCoKhIbvIAlc+IBALfy9EhT14S3WOqq2KrQAmjxJGwyFaIlhVg3uhK+P6zBu6vP4ecRjVExUEUixS8KGLMW2PQWQyVsViCgMszdv8DKW8GYOncPvL2kGNcmBo/WiyjbmHIRVAopVIoyXu4VZnPUy1hA82vv0HuKRRcgQMD9RbkTOR9++CE++ugjNG3aFBEREfcUfZeZmQmLxYKwMNd45LCwMFy65L7Tt2/fPsycOROnTp26o+f47LPP8OGHD9lc878AFqsNSTk6WKw2NAw0UXLd9yeaABf/m/EJ55hUTnzJCOFHv2F0qbM65tYpqkzGrPF8AGIJSSFtJkkRVRAQUIlky+JRJcmTtPMsJhOLCMTUs0wvKN5BBGj2F16fsvCE/SSmJHLO8bd8EjjyOyXlx/+kakYkpuql0ePAupcoIR7wK4uHekOBU3O538BYFkj6PHbHqncH6g1iYVI8ojw3gWbJANU7PuH0Iej4Jl+7XM2LaauV0atb3nF/npqML/l+/E0QPosPD+zR42G+ZVHk3Hn0uB1hahFOlTWCXB3KNBYBd4z79lkMquL5Qjm6Ob93BTw8yLtJb5v0C1zfwuqwMeBuNAnge59/y/P+bFZekDceDSwc5nm7gnQmO9rJBLuPW4Ph9NEbvphKVt9IEgKZN2iKm5/qiNiWqUgaOEOqYDT10Rnun7dGT2BWNyC6CUMABhZtp83g86cVefd4h8IWUAUWi/m+Ezl/67ooljEowR7hXhxVHyHZVjxdaveXQKW2jCfPSwICq3C7pWOBhiNZ06x9nr6FIbU8P789vS7rGn/EEmD8NqZ8Oo97ZV2nX06Pz/ie1hnIplRxiMRARH1g0Uig1gCShHZ/tMyrrK80ycCj38Em9QLq9Aeqd4MltB6S9AoMnZ8AlVyKx1tGQSETY0O8EU/1+hbSdc9xHzV7829i3YsQ6XPhDaCuTIUvO36K3zLq4kpaAUQiICa4SMcTUBkY+CvrTKsJ+TYVRi6Ox5mbrNdyCk34aO0FbD2fih+GN0KITzmPQ+Uk0Gg8YT9/l8hY87V/mQSeAAEC/nGUO5Ezffp0/Pnnnxg1alR5PzXy8/MxatQo/P777wgOvrN40zfffBMvvfTS7d81Gg0qVKjwdx3ifx5Wqw0JWVqsPZNye7GTWIuKBO9QkiXqEJrd+UVzIfSNAhaNYPHY9QNuc/Mo4yfj9rgfcTLreHtIDde5fDtq96cENzCWBQIAdHwDSLvgvqCJaMDZfDuOziDZsvJJV9mwRMZkra3vA/WH8jWlnqGyRiIDVj0D9PqKo1cVWpBE8Q4jqXR1O0edIhqQrDm9iIkdSQdYSPX8kouu8/OpQ5jGsPZ5Ftl2hNZiYSRX028ofj/TI7QZ7CL6RnH23ycCqDuYyp3EQ66vud0rPP/lBOGz+PAgoSh63E9ZBkWFNgNQ3l1aUZhKhFtaG0wWG2R3G0HuHVbUcdTy8yDgL3HfPove4eyML3jMtYOuDgH6F3mZCXg4kH4JmP0oL0rt8A5lkyWwChOpisOgKT2ZRyLj51MdDJi0nrdzNry1QywFavWmp054Pa63hVlA0lF63Enk3O/1nfy+GTQTWDLKQTpJvQCRlMqMhAOsL5zR7VOOcVvNwOVNgMVAA+XiCKoK5CZC5BsFiUkP+Hr2biwL/tZ1USxl7dHra5oOO48i+UZxZP2PHu4fm7CPpIjNwhH1phPoh9R0PAARa6ysOKBpRxL3+tyS+2j1LBtXE3fw/HoFcDtPnj17vwWGzGU61c2jrg0skQjo8QVJn/GbeAwJB0g2XljDdKvQ2lRXZd+A6Nd2t/+mpCIxQpo/jy3jR8PbmAFLwhaYlcEwRTZHgaoX/J5sDNHxP5lmNaef63kyFSJg64sYO3Q99D4WeOUnwGoRQaz05/efwgdQ+CBfb8Lzi07izM28Ei/r4I1sxGVqy5fIKUjjyKPzmL/FBBz5FZB5AZ3e5udOgAAB/yjKncgxGo1o3br1fdlXcHAwJBIJ0tJcR2TS0tIQHh5eYvvr168jPj4effr0uX2bfbRLKpXi8uXLqFKlistjFAoFFArhy6q8cDktH59vuIi+DaNQJUQNpUwCrcSPRYBUwXnlzCssCJo/QbO8w9MdRda+71iQ7f2GZnL20SF32PERLyKWjaeCxo4aPSm/ntEV6PcTyYz8W4wM7/KuaxIVQFIppj07yJENmUKlzeS8du/v2JG6dZbd5+hmJJCqd2cR2HAkYOjD2f1hC6jgub6DPjkRDfiaNSnAzk/ZTTqfxuJEGUCiyGICHpvLInf10yVnzbUZwKY3mABhn1mXegF1BwE3T/Ac7viEMvBl40kq2SH1AkYuASq0Ah6bw/N+fiULj3qDSaQpA8r6Vt81hM/iw4P4rEKElyV6HKAB7l0ShGFqMSw2ILnAhsp+d0vkhPDf3CQm4gj4S9y3z6JUxnGIZ44AlzbwO6ZyW6oA/AWS9qFBfhpN9Z1JHIBr3fIJQI/PSdYVR1hdXix7Uq82Gk0S6MYujjW7M+SP6cDxj+Ko3Q+4toMqV2UA10f7Ot/xDRr1H3GKh67alQbJ8XuYKFSpJaBJYqpaj6m8OE84SE+d2I58zfb6Iukw0O8XoFZfEhinF5NwCKvDBsuy8cDQeRCnXShK7Aq5g5N6Z/hb10WJDLi4lgreEYtJfBSkcTzdO4xEjb6IePAJZwqYXzRrltMLqYyK388o7ootWSttfI31XGRj4PQCYM/X3PfyiayVACpvmk4AgmKBHxvztsjGVNzoSxkd02YAEhlsxgKIOr1FE+abR6nsq9GL3/GXN/C9PDydHjW+UayHwmoD2uwitdCzrn6BNiu8D38HdXh1iPZ84VDxSGTI6j0LyVFtEd1tKqyb34TYncmyXwWEqsTAtskQ2RPUvMOA7p/x707ph3y9Gbsue/AKArDhbCqax5SjQjEv2bNX45HfgKYTgYCK5Xc8AgQIcItyJ3ImTpyIBQsW4N13373nfcnlcjRp0gTbt29H//79AZCY2b59O6ZMmVJi+5o1a+Ls2bMut73zzjvIz8/H999/L3T3/2Fk5htwLD4bbaqF4LONF9GxRgg2TqwBX4kJtqjGEM3p59g4/xbVNn2+54Jjhz6PRVPrKUBsZ0ccuCekXwYajaL6RJvJwiX5JHBiLjBqFdMP+v3EYi7lBIvNHp9z9luXQ1VKcHUaIuYkUOny6HeUEZ+aDyx+nB25dq+yKInfywJHn0eyRuHLLtCKSTRejGrCQm//d5T6TtpJj57iqVS6HKpshi3g6x08y7PPQOZVx0VRQAyLUrkvi6jUMySTzi5xJXEAJkcsGAo8fZhjZT5h9O8RIOAvEJ+pRahPWS4ubPTUiGh0V49yjiCv7HeXwwt2iXhuokDk/BOQKqh8bF1yzRbwkECbQaWDO2RcpuKl6QSa+9sVrbGdOVqzYhLQ+1tGQl/bygtoiQyoP5xmwWIxx6ZaP8uL7HMrqIIRiYDqPdmMcEcC1exN1WnGVSD1tIPEUYfw723X567bX9vG0ILhi6j8UAZyTdbnAqueZkMnvB5JjaMzSAJ0fItNHJkKWP8iCYyoxiQmpF58rV5+NMu1mklQWAwlDvWBxZVNPF+HfiEBEt0cUPqTBMm+QbWvTwSVTxVb8bycmseap9vHJF+ubAZ2TeV5qD+M9dOuL4CrmxzPk3yMhtBBVanS8okATswGljgZRKecAOb0ZaCETOk+SROATark/Xu+4jYhNalwPvQL30+RiGPqeUlUaSUfZVQ9ANR8FMiN8xifLjr2O1B/CEfHAMBiQtDacUgauReZklAEZ3ggPrp9AtHyCfz7sKMgjUqlkcuAao9ABEAuFUNvKkkEVQnxxuCaChiykmAWSSDxCYWX7G++fMuO83yfSQeYPPhGCRAgoFxRLkSOs+zTarXit99+w7Zt21C/fn3IZK7S+2+/LSUhwMO+x4wZg6ZNm6J58+aYNm0atFrt7RSr0aNHIyoqCp999hm8vLxQt25dl8f7+/sDQInbBZQ/dCYzjBYrpm64iP51AvBazUyErp3IKO9Nb7p/0Oa3qExxltoaNMDOqYAymAv2idnuH9vyGaZTtX2BJomZl7nQ23FuGTBmPUmXHp+zWMtLpCJl0ExeAF7dzOhTO+IySDA9+i2LSLv0OrQmJd43djntfzlweSMwfjM7gWnngAM/0q+jQgs+h9UMtHmes/uHp9OE2Y78W+xmdvu09MhygKaiE7fTMPLaDpI0duKmend2Td3BpANSTpLIESDgDpGQrUWzymUYi9Frii5+/O/qYUFKEaQiICHPCtwtH68M5AWWYHgsQEDZ4OGi+jbyU6nOGDyLI00BMVRJ7PiYqo6l4zhy03QcVaZiKddDmZprlTqUSthGjwND5nBdlMi41h75jSEBxSESUyFr0gE7P3bcXrsfDYrdwWYDDv5CdcbpBVTZ2D2c8m+5+vmE1iFJkHjQ1UPnymbg6laSOXoNX2e3j2mqHFCZvjMPC5JPMNxBLGFtVDwRND8d6D8dSDvLhpId2kyOYnf7lPVTQTp/dnzMCPDq3V2JnOwbrKPavkgV1vmVJF6Kw2wADv5Mku+4m2CUmPYQSRVAxkWmji4dXSIVytbiGYiubmV6aa0+wK/tHXeqgl1H0IsjL7mkMbbVDGXyfmSrBkMVVBeqhAOu9wdU5niaM4njjK3vApENEeQdhMeaRGPuIdfjfa9rBPr63UDw1leBzKtQBFVBfqvXYYntAHXA/R3Tc4F/tOf7JHLWwwIECPjHUS5EzsmTJ11+b9iwIQDg3LlSIgLvEEOHDkVGRgbee+89pKamomHDhti0adNtA+TExESIxULU3sMAo8WGX3ffQKBajjebiRC6+DFKYOVqz0SFUctFxR0i6pK46PQWsOsz1y5Lzd6UdVsMLDqKz78D7DKemA3U7svict+3JFi2f0gVzmOzuV932Pkp0HsaULULULk9i1VnEscOU6Fjf1c28THJJwG/SGDjqw7JuDqExeDFtTRAtqMwi53A4UtY/LozlRRLOXollgLrXwIaupHAe0p4AEpP9xIgoBgMZgtu5eoRVpZ5fm2Rn9VdeuSIRSKEqsoYQS6W8ELRLusXIEDA3UEd7LjYLw6JjGv0mcX8Ca3FyHF1EOPCj//BdfDgT/yxY+J24PI6jt70/ZFNk+N/8scZQ+fTMPfEn64X7SY9jZalXq7rm8K35PrnjMJMbpNxicc9+A8SAs5kVWgtjivDBhybWXIfNiuw4TUSEz7hQExHIGEfbEFVIfJ5iExiYzvy9T36LbD+Zdf6omIrqoxFIiqU3WHXVDbarm5x3HZjJ9B4lHtfnDNLqJC+sdPzMSUepA/O6fmuvkh+FaiQTjzIlNJ+PwOjVvP4k08APuFIa/gsrkliUTdMCbXYCOnlYqP32ddJCl7f7v65w+u59XqSF6ZCJJYgq+bjUJ3+w/XvLbCK6+h+caRfBEx6yNViPNmhCvZczURCFk23+9QJwiDxbvit/8CxfeZV+KydCHPHd5HTaBIC/O5urbxj+FVktLy7BkfDkfS/EiBAwD+OciFydu4s5Uv5PmDKlCluR6kAYNeuXaU+9s8//7z/BySgTLBYrEjPN+Dl9uEIOfIxiyGpF6W8pUEZyJ+6AylrNunoOWOxkjyRq4ERS4BbZ2hyHNWEyRp5yUDF1pRUe8LF1Uy1UAaQaKkzgNGXeSlc0D0RIIVZJF/kai54Z5d6fo7r21m8HPqFZokTtgK/tHAlnrQZwOpnKPu+tp1dSoDdwoJ0FrfNJwGH/ldy/41HA9d3sds54Fd2iEQSdiuPziRZ5FfB84VsVBPPxy5AQDEkZetgAxDmVwYix25MXgbvpTITOQA/q7kCkSNAQJmgDgWaPQEcdrf+jGEstx2xHUmUABxx6vU1fVOcvUU6vcXmzZpn+buXP9DhTWDP5451USQimeIXBVj0wMilbNxk3+DYk3fR+gsRfVtuKy1s9GG6dcr9a4luxjGxofOpEhGJqI41FnKNDKkBiOUkIbKzPY7hICeO32M2K8mhwBhAIoXFaoNEXD5Jj/eMyEZsSimDgCf3A4lFHjnRzTn+ZNIBumx60biDUev6vtoRv4+12vUdrrfbLCTPSktEUgez7hm2kO+3JpkEi8KHRJBUQaJu4XDWfQofmLtNxZaCGHyyOQX5+kSsebIpvM9NL5lyGr+PKaVHZ5RM0hOJgWYTgJVPlTgkfWRL5BWasOSiFU/0W4iQrVOoQgMAixG2oGrw+I6rg9lkAxAVoMKiSS1xNCEH606n4I223vBb4J4kk+77ErlRvXAtx4K6UX5QyiSez1lZ4BtBo/JFw+ldZkfNPkCH1wVFjgABDwjK3SNn/Pjx+P777+Hj4+Nyu1arxbPPPotZs9zIJQX8J6CQSaCQitE5RgXxqaKZ9z7fk8TwDnWfPqUKYsdv1ErOLZ9+jQt6o1FciBoMBxYXecAE12CRcOBHdnLGriNRVJrzvlQBVGxBlYxIxDnvre9xYRv8Z+kvqDCTHjktny6djJLIWcAAlCFf38ljzbjkup3VQrPHmo9yLKt2f0fn6PxKYNJ2Ht+R30jOqAKBZhNZcJkNgA2MEbcbIldsCQz8jYaPbV+kWqc4opqxKBYg4A6RkFUUPV4Wj5wis8qyFImhajFu5N4LkZP419sJECCgJBRqBgT4RlK5qsvh2txsIj9bG17hdnI1jW+lRSpaL1+g4XAqV5NPcKwyuhnX8MUjHfvPS+Qa+cRujvpaLfS1K0hnStDEbcC2D6noazyW66A2gxf8VgvQ9iXH+qZJYVPjxOySBIRczREvqRew5jmHN11uApB5jWPPuQl8fd6hTEEqDSIRv8tsNsCgBUw3oFdXgFrxkIxXGQtJYsEGzOnD90XuDRz4iSrf7lNZI5QGkRtFvKc487qD6YHU/AmOtrtD47EkPuL30p+oQnPg6jYqux77A1hclIhrswJnFgEiMaTHfsdR9dtIydPjrV61ILMWQnJ5A9D1/ZL73/EJMHgm/7UrafwqwNbzS4jsJtZOsIbUhjWwKtYcv4VCoxhjdynwVqelqBdgglIqhl4eALnIDMX2D9w2/qytn4deEQT7ihfhr0RffyV61g2HKOWE57FFswEKYxaGzorDqmfaoH60v/vt7gXBVame06azNvUO4+f5r5qrAgQIKDeUO5Eze/ZsfP755yWIHJ1Ohzlz5ghEzn8U2QUGFBrNeK93LUQESrlgyFQs1A7+THf/VU+6LoRiKeWzJh0wt79DpWIsAPZ8CdzYwWjtiq0ot3UmRnwiAN+KQPUeXKScx5WcUWcgsP0T4JGPgCpdgbjdQNp53udfgZ1Fd2NfAUUqHlUQpb2j1zBJyx1q96Pax4708+wgFidyABoXV+0KtHqGqVarirpD3sE0I4zpAFRoyWMyaqneUYdwtj/jomuqVeIhmjj2+ppJG/1+JhmWm8BCtuHjQPuXBQmtgLtCfFYhFFIxAtQeRh5LQ0E61XVlSLsKU4mwJ8kKm81292lZ3qGly98FCBBQOrxD6DtXdyAbB1YLyRL72E2lNkxw8i/mtyb3BgK9qc6xIy/Z1bzfoOG+9n5FlYjclw0VYwHXN5EECK/PMaZ931BR4+XHURv/Cmy89J7GVMvcBCDlNP169n9PFQbA9LT2rwKnFtA7JekQvxdUIVzn7SNUkY3pmSdX05xXInOvzI1sTJVfcDWup2Y9rHoNZJL7rJz4O2E1AxBRlaPXAA1HsbFl1rPJlXyCqhafcIcCxRnqENYhxVHtERIvzvCNpD9OQSoVWJ3fBXZ+4qp4ajCMCmF9Hkm1fd+xDqzRCxi3kf5Dg2eRPEo4wPc2ph3y1RVh0ykwbWhDXErVIMegQGTTiRArA6nA2T/N8Ry3TgGb34FpyHyYLVZoCvUwyvzg6x8AlUgKWdpZ/n1J5DDWHgxju9fx1a5cSCVi5OlMuJ6hRaFXdbx5IAUbz6XAaktBl+r+mDZoIXxWjXE5H4Zag3DCuyt+mXcSL3atjuph3vD2Isknk4hh8mQbYH97xHJYbcBnGy5i+qim8FPeO0Go0ZmQnm/AvmsZsFqBtlWDEepbA/7hZVjPBQgQ8Lej3IgcjUYDm80Gm82G/Px8eHk5ZPcWiwUbNmxAaKhwwfhfRFaBAdfSC5CSW4hIfxVOZFnRpNHTCDCncyQp9QyLq+GLaA6ceZUFVN1BgG80PWLsJI4zjIWAUUfDwvg9HD8y6RgR2mAokHGB5nv9fuZtzvJvgIVhRAOmH4TUAFo8STIJYPFQmMPCdM0zrt4AMiX9bI78Bgz8nWTLzaMkXw7+7PocfhWYnLBohOO24BosUtwhpDrJmoM/chTLXuRoM0n+zO7D11KrD4mwym1ZAGdcBvq7MQ8sSKM/QeOxfM6Ob7CICqnJwkpWhvEYAf9pJGRpEeqrgLgs0eMF6bwAKwNC1SLoLUBGoQ2h6rt8bnUoCV2TXvibFyCgrJBI2MAw6Uh0tH+VqhyA64rqDkcmZUoSOylF/oqH/kfyZOVkV8N/iYy3K3xJuqx9znGfPo9EwND5DAoIr8/gAFUQEFSFozf1HgOaT+b2qae5/x5f8PhHLqVKT+YN+Eex4WI101/Prmyt0ZPPv+EVV8LByw/o+CZfr9yXj1MGwuITDbn0IfJsVAUA2fFU73afCuz9loma9nGpym2BOoM4sj3/MVeVjUQG9PoS2Pe96z5bPMW/ke5TgZPzWLtV61aUIHUc2PQalVKPfgtM3ktDa7OeDTmZikTI3H4OpYrVzNot6TAwcAYfa7NSSdzjMzax/GtioDkEk+exrhpVV8kxsfUvcExowK8cy9KmA9HNYW02AePXZOJ4Uj4WTmqJPJ0RKzbG43KqF6Y0n4UYXxt81SoYvQIxfMYppGmMmDGmKZ6ZfwJPtI/F3IMJ2HPV4cO0/UouJhl88eWwHQjWJ8Cqy0W2d3VsjLfiy8XxsFht2Hs1Ez+PaIQedSNuj96J1MF8He7Uor6RuKblaNjxhFwUGsz3TOTkaI2YuS8OP+285nL7mFaV8FyXagjy/pti7gUIEFBmlBuR4+/vD5FIBJFIhOrVq5e4XyQS4cMPPyyvwxHwgCAj34D3Vp/DxnOp2PJCO0yedwKP1A5DQKX2aGA+C6lpBTe8vh2I2wVU6cIiTHOTRsO9vnFvjBdej3P2c/twzKhWX6pq5GoWZhlXuECmXwBmdOUsdYPhwOlFnLev2Zvy31VPc3+X1nFcy148iKWAUUNCaex64NxKIPsao8RjO1LZknSYXf7O79LE8ZGPOFt+aS0TLGLas1hdPcVh2idXU3Gz5e2Sr0kkApo/CcwdAOhzXO/T59FUzyccOL+CP85oNpG+Qe6QcIBGlWuKfKbUIcCkHcIFrYAyISGrsGxGxwCJHHVQmR4apuIFUmK+FaHqu40gL2oiaJL5/SJAwD8Je+y1SHrXxt//GHR5TH7c9y2QEw9ENmGkfEDM3a8lqkCg09vA/MH8PfUsGzHDFzIRKquomVOtG9evkBrugwfMBvraBFdjQ8g+XhXdDOj5ObDpLR4vQPKlw2tcZ1NO0ajXTliIpUCX9znO9eejjtsvb+TjRq0mmZATT7Pc6t2oNLq+E0idBTR/EjYvP2ilgXiodA1SJRBYCcgqel/PFfP6i98HrH4KGDCDiphL63ieg2sA9QaTYKs/hGotrwA2mGBjHXXzGOs5iQywWvndu9rJf2b9S3z+ZhOBRiOBzW/ze9pqcj9upA5mVP3G1+hRWARbTAektfkaO+LTUD/aD3qjCaFxKyE+W5RctvsL1jx1+gMVmsFSeyCuaFXYez0eADD3UDyqhnhj1SmmTj2zxjGON7JFRbzeoyayCkzYfD4VBrMVzSoH4scdrkQIABxK0KDTjHxseK4tJs05juTcW7BYXf2V3ll1Do0qBiDSnwSN1C8S5sGzIZ3Tx9WzR6ZCao/f8ckmhmEEqGUQ3wffpSvp+SVIHACYfTABHWqEonNNodkuQMCDhnIjcnbu3AmbzYbOnTtj+fLlCAx0RNPK5XJUqlQJkZGR5XU4Ah4QHE/IwcZzqXi7V01YbUBcphbLjt9E88r1sctSF51r9Ib4wDRubLW4ph90fpeGgjJVSWO69q/RlM4+z3xxjUNx02gUXfeP/0mDxpNzihQ2KUBoDQAidnUCY0i81OnnKCC6vEdJt1kP+FcGlo0FOr3LIsQnkuZ7B39ydOeybwByFUe4jvzK/fb+nseVfBJYNs4hy/aN5Dx6XjJTOja/5Zjh9/IDHv2GBWzf79kBtHsGSeT0ALCYOEq27gWHOZ1IxK5jpbbAcqd4UGcEVweOOcmcq3alEiqqKQkxVRlipAX8ZxGXqUW9qDJefGrTeYFWBoSqWMgmaKxoGn6XD1YXFai5iQKRI+CfRW4icGYpcGEV144WT1GN8CCnHZl09G1b/6LjtvSL9CgZtZJNi7tFdFOqNrZ/xPU2bjfHgwcVpRDF7QGOzABGreA66yldcddnwMhlDAuwj0/ePApc2wGMWAoY87l26nPZyKkZSiWPMyIakCyyGJlWeXUL10izno85vxLo9B7Q6lmO91zdCmx4AWgyHshPAxYOBUavhr60dMgHFacX05g47QLTomDjmNWJP0nGJB8Hcm8AaRep7L22jedlyzsk2cLqcByu1TOA5hZrGpGE/kjadKByOzagtn9U8rnNOqqPxRLuM7xeSVWzHe1foyehzrXJJYrbjSjfn5FmGoludSLRPEgP2eIfXR+rzaBXIACLOgKzExvfvmv35UzUi/J3+5SLjyZhYOMoNI/xQkttIMa2qoxcneM99lFIUTvSFyaLDWeTcwEAaRoDErML3e4vp9CEbK3xNpEDANLIBjBP3gdc2w7bzePID6qLrIgOeH1bNq6lc0xrUrtYhNyjWqbQaMbve+I83j9993U0rRQA33tU/ZgsVqRrDCgwmKCQShDsI4f3w+IZJUDAA4hyI3I6dOgAAIiLi0PFihXv3sNAwL8OGp0JM/bewKvdqqFLrTDk6UyY/ngTFBrNOHQjC22qBiOl6nBEn55XMi7UvxIQWpO+MAN+BUQg0SOWsROmzykZbWnH2aVAy6dYcPT+jkTH1nfpowNQAl65LS/u6g0GdnxEg0SxlONcQ+cByycCuizOSovF7ER5Qt5NJkdpktmtmtOXhUP9ocCwBYBMTSm5MpDFiqSAF5OT91C5AxEL+nMrgN2fszDq+iELIYUfH3d9BzCzKy+Ce37Fot+oJcmVfZ0FsLv0CImcY1R2BZCXHztoC4ayaG37MuXoD0tXWMA/CrPFipRcHbrWKkPnzqRjkV/GvzWFVIQAhQiJZUmuUgcDEDkl2wgQ8A8gJwGY1c3VbyTxEBsBfX98cP3KCtKBTa+XvN1qJoEyfgvDB+4GygCg6QSqY/NTaa5bkE6C4NZpblOpDZB1rWgMuiH9TWr34zoNkAC4vpPntfkkrulmA49FJCWBowygQteoYQrWjmKEQvfP6MtSmAEYTWweSRXAkNnA8kn07zEbgNPzAFM+99X2BRI+fhWoJE47B1FBKmTqh2wdNWmB0wtYI1XpXBQxb6bCqvlTQMVTDI/Q3KIv4fnlrK1WvOzYR9p5EjdXtgCn5rPmMWqBat15/5nF/Pt29kQqjszLHB03FvD90iS73q8O5tpRjMSxQ5lyEK/1fwZ6XSbCZGKg8zvA4V/d+hCKs6+jSki727/7eEmhM1lKbAcAZqsNaRoDLFYbmscEwWyx4kRiLuQSMV7rUQMRfkocic+GQirGM52qYO/VTMgkpV/7lEg0E0sgDYqBzmcsTgT1wxcbL+HMJkckeOeaoejbMPKeFTkmiw1ZBQaP92cVGGGylDFMoAjZWiOWH7+JH7ZfRb7BDLEI6FY7HO/1qe1CXgkQIODOUe5mx+np6fj+++9x5QoVAzVq1MDw4cPRtGnT8j4UAf8wTBYrnulYBTqzFS8uOY3mlQIQ6qvApdR8tK4ajEpBKtzMNiFq9FqIDv5E0kYkZlFRtSuwbxoLph2fOKJEfSKomikticqsZwE3YjE7SgofkjhiCdD5PY5cZVzkheXaZx2Ps5pZdKRfpPIlv6gDaLORiPGULhDegNvO7uN6+5nF/Kk/hEkeW94BLq8HqnSiamj7R+w0ApSGd3mfBFP8PvruqEOA0asds+nVe7LrdeB7klQ2G19b88lA9e4sMM8tdzy/3JsePidmk8Cp2hVo9DglzPZZ9+vbgGbjBSJHwB0hJVcPs9WGMN8yjFZp7dHjZVeAhapFSNR4iAMuDRIZlWd5QgS5gH8IZgMvit2Zxl7ZxAvdB5XIyb7hPoUIoMJIl333RA5AwiCgEn8K0oG8Tbxgl3sDtfrRjP/6DuDkXBI4fb6nCmTlk/R+E4mZ8hhaE0g9BxgK2Oy4sJrq29wk7lfhDdQeAMDmShK0e5VqkaVj6LUCUJ3T+R3gxDyg6weOca3AWMA3imTR/MEOxWxUY6Dfz7AZCiAJ9r77c/BPo+tHQH4KjaGd66xOb5FIS9jP706Tlp5GhdlU+dpVwQpfnq+t7wHdP+W/VzayPpF7A62fpTF0YCyQcsL9MQTGUk2Vepa10eY3Xe9XBjren+Ko2BJo/SwCVo50HFNADNDlXaqpnFXeALICG6FuoKPeGdWqEladLEYcOUEuFWPmvnhEB6ggFQOXU/Px44hGmHcoAXudfHJ+23MDT7aPRaS/Er5eUmj05hL7ig5QItBDSIBSLkXDCgH4YXhjHLqRhXy9Ga2qBCHCz+u+eNd4yyXoUD0EJ5Ny3d7ftmoQfLzKfslosVix6mQyPt1w8fZtVhuw6XwqEnMKMXtcM4SUdSRbgID/MMqVyHnttdfw9ddfw9vbG7GxTCjYvXs3pk2bhldeeQVffPEXUY4C/lXwU8oQ5KNAXIYWP/WJhFoTB2XSLogjgmEK74akQgkqilIhmvEoUwke/a4oRSKAcZMdX2fUpLPyJv8WSY7Ra5h85U5urQ4h6WKzUt2yaypvf+Rj+tpse58JF3s8/D2mngFgBUJq8feLa+ivc2e83c0AAQAASURBVMxN4ppfNMmUq5tK3mdHeH1gy7tMyZB6Ma58wdCixIgiZF4Flk8Ahi1kh1bqRRPH6zvZqfXyY4G76Q0g7ZzjcYZ8YO/XgERKeXv9oZzj941kQaoOo2Q88QCQcJAGkGY9O2TNJlA2fegXJnaF1ipbMS7gP4N4e/R4WYgc+2fVy7/Mzx+qKqMiB+AFmKDIEfBPoTDLc+QyANvJuRBVal2mRLe/H391TPdwzNpMrtdiCdBgJJsSNhvP14wuriqM3V8AA38jWZN6hmv8xbVUwLZ7hefuykaqaH2j+Hk/tQCYuBVYOpakS0RDB2kW044KWmfcOk0lzqDfSQwpfLjONh3PcIWkg2wm6TXA2WUcA1v5BDBuEwrNYviXJVXvn4JYyvrgz0dL1llrnmUN0vYVEuD2wIdL64EOr1JBHNGQ53T7Rzw/Oz51kEEAFTa7PuP71PltYO0L9MPxq8DnOLWA71GdgVRF13yUyqDEg67hFJoUjm+VOH4JzYztdY0dOXHAiicYoBG/j6QfAPhE4KZXddzI0CJQLUf1MG80qRiAeYfcmA0DaFUlCCcTc5GnM2LRkUQMbhKNbRdT0alGqAuJY8f0PTfQs14EfhzRGOP/POrikaOQijFtaMNS1061Qgq1QorKwWqP25QVEokYA5tEYdaBOGh0riSTUibB2DaVIZeWPXEtLd+A77e7V11dSNEgJVcvEDkCBJQB5WafP3v2bPz444/44YcfkJWVhVOnTuHUqVPIzs7Gd999hx9++AFz5swpr8MR8A/DaLYgu8AAjc6Eur5aRG4Yi8BlA6A8/D0U29+F9+8tUd1yFaGniuLFfaM4L73lbWDGIyx4C7NYHLjD7i/Y6XGH9q+yKNnzJaAtMsXzCQeU/g5DYGVA6Rd1yceA5KNA1S70MqjUBqgzwLXIDq4O9PmBI1C6PPf7kchoUpx0iL/X7gecXuhK4thhNlCxM2wxMHQui6yzy4B5A5m+JZa6kjjOOPwrx6wWDgMOFSV4+FcEZAp2LVdPoezZrGc0aI1e3HbX55xJnzeABW12HIumh3HWX8DfjoTsQkjEIgSXpUNYkM7C28unzM9/b0ROsPt0EAECHgBYbCJkF3pQvfzTCIzxrIINqFw2nzVDAX1w5g4AptUF/tea0eI2G0d1lk8sOUpj1tMjrvUU19sTD1H9sXAYa4O931Ahe+sMTY8zLlOtcWE1mx1iKX30dn/p/tj0uSRo9HlscPT/HyCWcwQ6/SKw6U3g6AygVm+mYuo1QOJh2GyAwXxv4ynlCi9/hkl4GlM/9DPrnJPzHLeJREBhLlCxDf8//zESYz4RriSOy37+R8/BHp9zBGvj68CFNfT+G7OOqpmWTwNZN0iadXwDmLCVNV7blxzR46G1Xfdb9RESS+5STa1m1jx1BvDXCq2Q0HcpnlufjgC1HJ/0r4MedSPw9PwT+GVkY1QOUrk8vE6kLya3j8WcA/FoWjkQB29kISVPj/6NojDnYELJ5yvCvMMJaF45AJtfaIcJbSujXbVgPNu5Kja/0B4Nov09Pq48UCFAhRVPtUa7asG3b2sRE4gVT7dGhQBVKY/8axQazcjTea4bL6fme7xPgAABnlFuipyff/4ZU6dOxZQprgusTCbDc889B7PZjJ9++gmjR48ur0MS8A8hNU+PWfvjMLJFRRgMBoTHzYQk9ZTrRjYbJMnHgNST7MAp/am0sePWaXbQOr7BGfriHjW3TgM9v2CqxbWtLP68/On3YtYDq59ml8cnjJJq7zDg/CrnA2Bhak+TKg51KIuPrh8wznTNFJJKI5ayu6T0Z/JF/D6qW6p0pNlxcSgD2C20IzC2ZOKUM5JPAmH1imTsNqDV08CKSbwALU7iiKUOQkiXQz+csDqMY/eLdmzn5QdUaEE1EkAD6EXDXSPVARa6Oz7m65X7ULHjF+X5WAX855CQqUWoj6LknP+doCCNnwdR2fsLISoxMnRm6Ew2KGV3G0Ee4lnaL0DA3w1VEMeGj/zm9u6MqoOxcH88pnSuek+d8b8F3qGMi179jOvtEhlJDp+7dR8HR6QWPOb4XZcD7PwUSDrCdTf9gvvHFWZTseq8/gFUog5bACwb7wgRuLiGaUVZRUk9Jh3X9UEzGW5wdKbn40s9A7SaArR7rWgczsbjta+bhnwqUWr0YhpW/F5Iqz4GmeQhih836x11gTvcOgNYDBxfshtJ13yUtU5kI0fDxzuU598TKjSjmbV9TA3g+V0zhYqajMtseMV2IsmTehYIrgq0fZEEWvolxpYP/B04+jvNpm1WkmwJ+0s5/tPQDJiH9BqTsPumDT8vSoXOaIG/UobXl59B31q+WDg0ChHybKyc2AC5BXpYTTqYpGocTdHj+UWnIJeK0bhiAH7acQ3xmVo0rRRQKmGRVWCERCxG1VAfvNWrNgxmC7ykkvuSOnWvEIlEqBrqg59HNEauzgSbzQZ/pQx+qnvPWlNIJZBJRDBZ3I8+R/gLahwBAsqCcltRzp8/j379+nm8v3///jh//nx5HY6AfwgZ+QZMWXACv++l90u4RAP12bnuNy7MhM03mjPRe79xv83+74GGw0ve7hcN3NhN0+Ch82i22P9/JFa2vsdi7+gMJld5hwFB1Vw9bi6tdxgmFodMyf1nXWNRGFwLGLuBhJCkKLFi8zvcTuFDgqUg0/3+xFJ2LJ1eM3yjS253+3VFMXFj42s0GAyuBgxfzLEpZQCVS49+w5jWAb8CI5ey2JSpWNSMWgVE1KfywQ5VIIvw1s8ySj3/lntFEMDCt3JbdkbnDaI6R4CAIiRkFyLUp4zz+vlp9zRWBQBhRclVNwvKYngcwr9n60PUMRfw74FUwe9qn5Ljq4aqvXBYE4Q/9scjS/sAqnJkSqBWX2DSLiocIhoCTScCTx1g+uHdIj+Va5w7XNvqGIXxBKOWjYvix7jrC6o+AHrCDZ3HtbFiK+CxP6msvb6dqVURDUpvVPhFk3ReMBhQ+dO3pXjzA6C3X3g9jijBVjaS+5+CxVh6iqBfNAl4SdF3flRjfodnXQPqDqZ/EUDlUmmpa43HcKTdHfZPAxoMZWjD3q9Y/5j19DxaNoGqLamMRN/SMfS/GTqXZtNVH+GYlqeX51sBM07r0HV2Cj7efgu5hUZ8O6Q+vCVmbB8djle036DCnJaQ/tgQ/punoLLlBqrsfQmxB15Hl4BMPNkmEt8NbYiP1vLapXKQGiG+XmhdNdjjc/aqGw65lJdeErEIKrn0gSBxnOGrlKFioAqVgtT3hcQBgGAfOQY0cv95ClDJUCXk/vlH2Ww26E0WWK3uSSMBAv5NKDdFjkQigdHouQAxmUyQSB6wLpOA+46sAgMmtY/F0xIRDGYrZGKw6HKHc8th6/UNRDIvz9uYdO6Lp6bjGKmdcYmy3xFLqDIpjpNzgAbD2N2RyoH4vbz9wipgyByqUG4ec2wvU1Eqvf97/q4KBGr2oMw7J55kTXB1jlPt+YpKmYnb6XnT/mWqh47/SalybEeOUgEkYDTJjDHt+SWLSXeoPxRY9TT/f/BHoHIbYMnjjKgNrw/0+wnY8KqjwygS0RRy5FLAryIg95AMoA4BCnM4ulali/ttAHbY7NHqGRfpoeMb6Xl7Af8pxGdqyz6/X5B6T0bHgCOCPEljRbWAu1xPvEN54VKQJnhBCfhnEFAJqY+thdeVNfC/sRaQqZBWZwJO2arh1TVFo74P6rWJly8Q1YgNE5MOkKtJTpn0gNnI9fVOYchnU6TxaK4517a5Kldho4muQVPysSIxxySdyR6/CowZTz4GNB3LdCrvUPqk2LdT+ALdPi5S564AUi9ybGfFJPfP0WCEI6nLN5LpTJ6QchKoPxwq+UNW44pEVIkd+MF9ndXmeSD3Jt+H3t9xfG3di1T1+lWgV9D5Ffx7CKjMc243gXaGTOVQSRWH2cAfnyj3RuDHZrDek8hYJx6ezh+AxzN4pmvIg/Ou27wEv7RAdKllQ5UQb4xtHoFwaxpEBg1Eswe7jJSJLq2nSmzQTMjnD0LklbV4fMgyPL4lF/FZhQj39UKlYBX8lDI836UatpxPLTFGF+nnnuTJ0RqhM1kgEYkQXFZF6wMOpUyKlx6pjqTsQhy8kX379iC1HHPGN0eE370rckxmK5JzdVhxIhmnb+aiZrgPhjSrgGh/JRSyh+yzJ0DAHaLcFDmNGzfG/PnzPd4/d+5cNG7cuLwOR8A/gDydEQeuZ+KFRafw1LwTKNCbYJIomVjgDvmpKFRFwfZXXXqxEx8pEgFNxrHoyLhElcqIJcCer90/1mxgR0ebxc5TQAxvt5hIztQdzK5dj89J4EzY6khlePQbYNRKACKg9fMsIk7MYfrU3m9I4tR4lKqcVs+wCAmuzgSNWn1J+hRmA2eXAH1/IJmjzWTR1+F119clkTG16sYuR/FqtbC4lcg5Q273urGTOABJlwurgMtFBo/uYDFRnXRqHpB5jZ1ITwiu5poOcWaxZ/WOgP8UbDYbkrILEVZWw8L8otGqe4C/lwgyMZCYX4arXXUI/y0ebSvg4YSpkOR6+kV+Fz8kvl7XjQF48norTK/4Nb4N+QiP7QrE5JVJMFls6FIrDL7Kcg8bvTvIlGxwFGbRw23xCGDZOCpkCzL++vEAFaNNx/GzaMgHun3CH7uSND8NaPey+8c2GQdc2uD43S8a6DMN2P0Vf08+TgXrlndcyR6Dhv46DYZxNKswja+l5dOu3ncyJUevzq2g905kIypSShsJ9YkEpF7IKngA1VSlQSQFNKlUt8icmkAiEdMwrRYgvDbHs42FrDcG/kqvoPxkBiVENgJaPgMkn2JYQ/Exu0qt+fjSIJFT9eTuu9lm4+e7/asl7zProJUFw9j7R1eFlliCvLbv4o/r3lh/JhXPd6mGN3vWRKTuCsTb34fo7BL3vkC6HJKKVToDVgt8Nr+Ep5v6oEqIGvMntkCEH89RpUAVVj/T5rbXjEwiwuAm0Vg8uZVLzLbWYMbRuGyM/eMI2nyxA71+2Ivf99xAer4bT59/AcL9lPhpRGNsfL4dfhreCIufaIm1z7ZF7Ujf+2IAfvpmLrpP24MfdlzF7isZ+HXPDXT/bg+OxGfDfI/R6QIEPKgot4rglVdeQf/+/WEwGPDyyy8jLIwyy9TUVHzzzTeYNm0aVq5cWV6HI+AfwNW0Aqw+lYIQbzme7lQVqXl6VA0NQl7HT+C3sDdnmp1gCa2HE1kytIv2YsfL3RiPdxgLtf6/sOsTXp+dwMRDDhWKTMWUAk9IPQuE1+Wc99B5NMCzExS3TnEk6foObleQCkQ3A5pOYGrVrs/pzzN6LTBkLgmcW6fYeWoyjj48C4bSM6fD65QYH/+TBc2ySUD/n2g+fHkT4zzlanasIupTvZN9gwWi0p9qgcBYysDPr6RkW6bi7W1f4giXp4vQYzOBFpNpcFwc+anA4f/x/8YCnmdnzxxntHvFdczNy5/GkwL+80jPN0BvtiLMtwyjVcYC/tyjIkcsEpXd8Nge7ZybCESXYRxEwIMDzS36qZxZRAJH7g20eY7fyQ9qhHcRqoV5w2wFPt/lmriolEnwSrcaUCtk/9CR3QXykjl6m+GIGsaldVR3dP8M8A4p/bELh9EXxfmxtfsBXT6gT5ukaJSmzw803M24TAVIm+dhq9oVooxLXLe9Q9lgWfcSkFtkQFurHwmG4ohqDDQeSxJn0k6urafmAfWH8cI9/xbrDYUPsG8akHqaipQKzVkvVO3CseriEIlJVmjTkayxIUCtgP99Glf525GXyPfqxm7gid1sjulyWUfkp/B8LBntqM1GrwYWPU71U3hdplkd/xMYPwL4vRMwYjH9jSQKjpH7RQMZV/j++Vd0bzavDmaTbfFIx20SGd9vkw7IvwWrSAJrTBdIKrQE9k+DKDcRxtD6yGz4DKbuNUKEqvhw3H4g8yqMJhMKfKtg5kktFp7iZ2zqhouYOzQWsjXPArX6AFdKSRlN2E/V8rXtQE4cOlaUol7NFrDZbLiWXgBfLylCfb1QM8IXP49oDI3eBJFIhEC1DEqZ6yXX8YQcjJ515PbvWVojPt90CUfjs/HVY/URqL73WPEHDUHeCgR5K1Arwve+7jctT4/nFp4soYIyW214duFJbHiunQuJJkDAvwXlRuT07t0b3333HV555RV888038PMjA5+XlwepVIqvv/4avXv3Lq/DEVDOuJldiKScQnSoEYLudcKhNZhhtlgx/3ACnm1XBwUj18F7xzs0G5WpoG0wBsbGE1E1Jw0wq4CeXzF+2zl9QKqgofGqp1kwSGT0t+j2EbtDIhkJj4otgSZj6Y3jDrV6A+tfBvT5JEoKs2hanHmZiQ3zHysyFy5C0hEqcE4tcKRcze0HdHyTyhqxjKRU9g0mSmmSgRywOB22EGj9Ags/pQ+VNAA7x6ufcfjq6HJIJD11iP9eXs/kqcIsXpQ0GgkMmkWZvcXEgrU0o1aTzrN02WJwvW/7hzQNjGvABC1DPmf827zA48284ti2ydgHNA5XQHkjIYvd7XuKHi9Luk0xhChFSCoLkSNT8+JNiCB/uFGYzWjka04X1cYCYOdUjvh0eM1zwtIDgFAfL/w8sjHWnE7B7APxKDCY0blGKJ7pVBWViiXnPJCwWLg2OpM4dpxdWkSmeSByLBaOQjuTOHZcWA3U6gvD4LlQXFoHnJxLhWvj0byoL8wErm6DtVoPSOL2AueWcu1y9r7zjeJ3TPZ11323eZ737f6cpIRUAdQfDrR9ng2SJaNpRi2WAw2GA+2KGicX1nDMWuFLj53UsyXHf3p9DYikEGkzEa4MQm6h6eEhcjQpJMT0ucCfvek9YzaScLm2lUpfZ0Xute1AWG02gRS+QOUOQJd3SI75V6D6+eYxqqIU3vysWs1U+nb/DFgx0fX9ksiBgTNYS8m9qchq9zIQ1YRKO7k34F8RBv+qeGVrDp7p1BBHoj+CV7QRF7KsWDY/E4VGjoTtjS/A1AGN8M7a88jWuhJGmQVGiPQ5JKqqdC7dq00ZwO+TIojFYryz6hx2Xk6HzQZUCFTio7510SwmEL5KGXyV7onX9Hw93lvtPmV0+6V0pOYZ/pVETqHBjPR8Aw5cz0S+3ow2VYMR4eeFoLIkXTohq9CIlDz3SqbcQhMy8g0CkSPgX4ly1eg+++yzGDBgAJYuXYqrV68CAKpXr45BgwahQgXPhmQCHm7EZWox4vdDuJWnx6f96+K1ZWfwXJdqiPTzQo+6Eejyw2HMGNsMl6p+g/qtZTBYAD+VAjFbX0dA3Dag1zfslg1fzG5IxkUmGFRuC+z7zjWtSaYCvAIAVQgwr7/j9qHzgQotHTHfdjQYziJBl0NCQpfN2yxG3n5mSckXpA7mz5HpjtssJiZU7PmKxzmnr/uTsfVdKnd+78gEqeJjSSado5ARiSkbvrQe2DXVsY2xgKSONtNhJGkqBPwreX4TFD4lzR/tkHqxOLFHuZp0LFyrdeP4WGgdnuMdn7gqm1o+TeJLgAAACVn0sbonIuceR6sAIFglRmJ+GYgckYhpdAKR83CjIN2VxHHGoZ954R9QynflA4AwXy9MaBODfg0iYbUBvkopVPIHfKTKjsIM4MSfnu8/OhOo2MJ1dPj2Y9NLfazt8iasinod7WqMQeTFNWwqbHmHd0pkyOg7Hz/syMAHbUdDcmNnST+WR78FrmwFgmsAWUVkTnQzGkw7myubDTyOjAtAr2+5Dmszed+lNYBCzfXeDvtYVq+vgew4GvL6RgJVOtE7R8JQg/y0Avjch++4ckNITY6L2xVMBWnAykkOn7ziMOtZZ0Q2poqm5xc8H9qLbMatex7oPY0JpM7vTdZ1NsAm7eBY3K1TVDzXHQhABMv1HZB0+5hKu4trXGPhJXJ4DfgV/WvUwrDfDuGnEY0x5o8jJQ4xt9AEm02EbDdm4U0qBcBgsfGC6OLa2yljblHvMar9ACC0NrbGm7DjEl+LXCJGrXBfbLmQijBfBWpHeh4ZKzSYEZ/l2bT7eEI2akfeX9XKP40CvQlrz9zCWyvPurw/XWuF4rOB9RBS1rFsAJa/CCkwCyEGAv6lKPfKIDo6Gi+++GJ5P62AfwjZWiNeWHQSt/L08FVK4auUoWVsEPxVMlhtwMZzqSgwWrD5/C2EePui37zLaFzRH3/UPgl53DbuxGYB1j7POMoajwKtnmJ39cD3JQuKGr3YcVswxPX2FZPocdP2BRrfSb2Aao8wwtS+KFdsBWRfA7TpwOXNTMNqOBI4vcDxPAGVOS/uXEg4IyDGdSRJ4UOfnaAqRZ4BSzlXbjGxe9feQzIHQCJFJAL2f+f+/nPLOc/Pk8R9BsaSgCqOFpPZzbLZSipovCOANi8C25wUSzYr5cVp54Hxm2nMrA4GTi+mqWXDx3kxdB8UFAL+HUjMLkSgWn47keOukJ9Kub28jEbJTghViXAwxQqbzXb3c/fqYI4TCHh4Udr7Z9K5N8h9ACEWixBaFlL0H4TNZoPBaIaXpeTF8m2YdVyH9Bo2IGRqwMunaAdwVb8Wg8iiR4ifGjOv6DF0wEaEpe2Fb9ph2MLqIjW6B97ZmYsdV29BowvGtCFzIbp1iqoRLz8mH0kUwNpnqTi9spHH0WQMmxTukHQEMGn5vWAncrLj2IRxhkRG9c/SMUD1nkC3T4ED04DFo4oM1DNgazEZ+5L1GFbxIRiNs0MiZZy3HTePATEd6NXnDC9/vs5KbRgF3uurItJlFZCbxPGziHpAv+lsCg0uCqKI308lVN2BQNxeYOEQYPAsjpYr/DnSfnQmsoeth1gkRtCtMyRanGExQrR8ArpO3ofh9QOw63I62lcLwe4rJf2YFLKSa5OXTIwBjaKQasxHlfD6rDMN+SRszi513bjBMDa8NClUjvf6CVMX8++ib4NI9GsYic3n03AzR4e1p1OglEtRMVAFiVgErcGMNI0eOy6lI0drRJtqwfi0f118uuHibdWQM/yUD4lq6y6QkqvHmyvOlrh928V0tD17C2NaVS6zV06gWgFfpRQaXUnPRoVU/NB9lwoQcKd4SFo8Ah5WZGuNOH0zDwAQE6SGxWqDQirG0F8P4dshDTC+vgLvNYmAKO86RL4RmPhqLVj0BZBrKgHDF5Gs8K/IxRXgiFH1bq7pSXYofEjUzOxWMp7UVEjDxcn7inxo0jjKZB8pavMCIAKlv5c38rYLK4Hmk4BRq6mcMeuBgFiaBluM9NFoPpmkEMARpQtrHKqCGr04enRiDlOofCM5fiWRMRbz6mbGoTcZBxz/w/V4lQH0o8m+4So1Lo6CdJoHmo303xk8i2Niycd5v0RGP59qPZi4cGUzvQack6YkEqDhCECfAxz82VFIRzTk/uwRrD7hjGwVIMAN4rMKy+aPA9C8VBV4X8b0QlUiaE1ArsGGAK+7JXJCeOEh4OGFKqj0+2UPwXjSQwir1YbztzRYeigdb1TrA9XJme43bPMiDf13fU5FTXANoOPrVGCoAoBa/ZlE5AZZ1YZg+p4EVA/zRqooFN8kNMWHfccg3E8JUa4OMvl5iMXAkPp+rBnSztHYWCxjepYyCBjwG8dy+vzI5oXcx30akh0pJ+nFs3gkaw6rGUg4yAv9uD1Ub3iHsVFj9+zb9AaJIjtOzAaajket6GAEqB4iIqcwx9Uk/NQ8YMCv9CA064Gm44EaPXn+vPwZsd7vZyDxALBysuNxVzaxthgym/9PPAjIvABYAdioztr9GbfNiedzylTA1ndhbvcaftiXhrdbK7kPd7BZITq7GFOqNsf0xCjUivApQeS0iAlEmI8CzWMCcSSOqUmNKvjjvT61IRUDSVolVB2/RsTy/kzvbP8qMGwBkLAfNpka1uo9IMqOg/j4LBS0eBHaWkOx7JoIN3NS0b5aMJpVDsTEOcdul6V7r2Zi9sEELJncCrHBaqw7k4I3VjiUKD/vuo6G0X74flhDTJ57HPakbLVcgkdqh6FJRX9YrA9ZXP1fYNkJz2rXGXvj0KtuRJkJlzAfBT7qWxcvLD5V4r63etVC6D2ObgkQ8KBCIHIE/C3ILDAgKbvQpdOgNZhRKUiFFxafQoBKhj4VjZCuehIi53jvoKqQ9PyCZIQuB2j+hGtaAgBsepPmxukX2TExFgCxndgtyUsuSeI4w5gPNJsIJJ8EqnTlPDfAQsJi4Iw3wCKtTn+qZ0yFnJnPus5jyU8D2r7IMaNNb7CAA3gh+shHQHRLIKwuu0yLhjtiO3MTWQB1eB3o8AYlyNHNWPjVfBQ48jtHu6o9wjGw1U8Dj3xY+okOqko/n3PL6QV0cT0Lq/avsBiSyIBLG/kazi5lJ+3UAiZ5+UawACvM4jE2GQ80m0RFkkwJqIpGyAQIuAMkZGoRWubEqlQaet8HhKjYdU3U2BBwt4fjHcKLEAEPL3wi6ZmS54aQq9KV32sC7hhpGj1uZBRg/7UshPt5oV21YIT5esGrWJxvSp4OI38/BJ3JgvFjJ6Dy5VWOtdGOZhOBrKtc2+zISwKub6NKps4AoPUUNlGcHysSw9JkArL86kIiTsf8I4nYcC4V04Y2hN7EkYkIfyW+fqwBLNoc+B76AqKjxcigls/QD2/zWzwOnwiOXCsDuP57Sl+UenFUavxWNlzC61MxIpbRr+XsUip6mk0kkZN3E+j8Dv9/cjbJH1MhbDYrMvINMFmtkErKLTD23iB3Ij19wosSvCQ0NZar2RxaMNQRVKHwZfPnzBI2i+zj1/Zze3U70OVdYN/39DGs3p1109WtbFiJRPTjs1k53u0TgezqQzGuUAe5Kd81MbM48m/BJ3sxhjd7CzPOlYz9fqFrdbyw+BR61ovA+DYxAGyIClDCarUhIUuHt1adRbVgFb4dsgVBVxbB+/oOmDJvoKDZ80gThWDYzLOYP74jNgZXxrkUEwric9A8horkUa0qYcqCkyV6i4VGC15acgq/jWqK15eXVKKcupmH/dey0LFGKHZcSsfjLSuhXbVgbDp3Cy8vO41WsUHo3ygKFQJUEN8loXMrT4fErEIk5+oQE6xGVIDS4/qcrTUiXaPHicQceCtkaFjRH6E+ihKf8XuBzWZDSq7npmRWgREWTyN7dwCpRIwutUKx9MlW+HbLZVxNL0DlYDVe6FoddSN9hfhxAf9aCESOgPuOdI0eLy85jb3XMvG/xxtDKZNAZ7JAJhXjUqoGr3avgWG1vSDd9LwriQMwOnvzWyQiNrwKHPyJhIhY4iBETIXA0rGU6zYdz+L89AImUwyaUeQtY2VxpvChlN5qITkhEgMzugLD5lMps/GVonEjMdMKBs+ihDr9ArBsPGflAT5X35/5HBkXgMdX0FDTOWmrMBtY8xwwcTvnwFdMdByzM/Z8xUSqym2ANVOoClIHUzlTbyiQfIzmyTYbcOuM5wQp3yjH+Wr3KgunvcNKbgcAyUeBRo+TyEk9Q58hTTLjyv0iWZRmXadiqFo3h9RdgIA7REJ2IWqWNYkiP4VjifcBoSoWvEn5VjQIvcviTR1CZZuhgGacAh4++EYAI5cBc/u7XviF1WMMtfIvoo4F3EZKrg7j/zyKS6kOM3yJWITpIxujXfUQlwu9c8kaaPS8YB+3OgOzBqxD6KU5UF9bC0gVsDSZAEmNHsCv7d0/2fqXme4UUJleKYd/Ay6sAmI7wNJkInB5PaoffgO/VmqArI598eaOXKw7k4I3e9W6vQsfLxmgyeb4TfUebGYk7Kd6pOajNO/v9Baw5V2H31uLp4Da/YFzy0oek9yba/O6F7iWt5gMbPsA2PQ67xdLgfpDgHEbWa/sKYo4l8g5stXnB2Dtc4BfBWSZ5IgN8S6RXPRAQ+7NMTKTDuj5JYMQ7KERcjWJmo5vcNQdYK21aDgwahUfs+19jmfb99V6CsfbvMPoM2S18v+LH+c2nd8DrDbg+g6YWzwNc6Ox8L51FqErx1AhE9WY413uENkIODoTQYZEPFKrCSL9lbiVp0PL2EBIxWKkaXR4vks1nE3WIC6zAG2rhuBWrg7Td1/H+DYx0OjMOJ6kQec/8tG5eh/UjxiALIMIx9bk4eVuYYgKUOJQggY/7aefm0gEPNOpKvxVicjTmUqkJdlxJa0A6fkGj6d4xYmbWDipJTrXDIXeZMHkucdv33foRjZ+3xuHpU+2uquUp2vp+Xh8xhGkahzmv9VCvfHHuGaIDnBVJGbkG/D+mnPYcNahSpOKRZg2rCE61wy9b95cIpEIj9QOw7oz7sm4ZjGBUN/jc/l4ydCsciCmj2oKndECL5n44TEWFyCgjHiIVhQBDwOsVhtWn0rB3mucG151MgVjW1fG/3ZfR6MKAagUqMaCw5cwqWYARNe2ud9JxmWSFHZC5sIaoNEoxlg6I+kIEFAFqNaTapQKLdhda/4kEFyFBYI2k4lOOfFFhEkY0Olt4PB016hQm5WpGBYTx4yO/FbyuRYOZVqFUUeCyeZm4bZZKf9tNoHP6Q7B1ZmItekNx23aTGD3FywAz690jI0d+ZWpDetfct2fKpAXJZve4IXnsRmUpXtC+kUgtEh9JBKxmLq6Gej8NlVIZj3Q/mXAkEeSp0Jzz/sSIKAY8nQm5OlMCC/LaJXVwhHB6Gb35VjUMkAlJZFz9w8uStPRJHNMQMDDidCawKTtQE4CR+WCq3FN8Qn7p4/soYHOaMb32664kDgAYLHa8NT8E9j+cgdUCnJ4WsVlFjj9X4tH/ohHz9oD8EizITBagDa1aiKi8KJL4o8LDBqgIIMK1YDKQNcPYGvzPEy3zkH+Z/fbIz6+2Azfoz/g+wFL8fSeAhhMxT7nxgKgy3scW5apgGbjAV0e18xafYElo1zHlU/8CQyZwzEv+wg3QKKi/y/A/mn83b8SsPYF13RIq5kKV7GMx337JBmpsG3xJFCzD2y1++G9nTl4tVto6Sf9QYNZDzzyMdXKq54CtE7jSkYtiavuUzmGfesUb7eYeD5WjnZVVRkLOE4XEANUaEqfmfB6TB9rNBqoOwg2L38UiLyxwzcEEnUQ6uVpUWnVOK4RJ+Yw2WrpmJLH6RNBFV7WNVjMFjyz9ASC1HI0rRyAmuG+2HEpHV9tvgyZREx1ip8XovyVkEnFuJyaD73Z0XCzWG04cCMXravVQFU/L4QEaGG02PD9sIZ4e6VDVWOzAQuOJOK9PrWhd+Nx4wyTxfNapNGb4a+Wo121YHT+ZneJ+wsMZry18ixmjmmGQPVfkxJpGj3G/XnUhcQBgKvpBXht2Rn8MrLxbXLDZrNh/dkUFxIHcER2b3uxA6qE3r+GRrPKgYj08yqRLiURi/B69xoeE77uFn5KGfzu074ECHjQ8Y8QObm5uVi2bBmuX7+OV199FYGBgThx4gTCwsIQFRX1TxySgPuEjAIDZu5zJBula/R4plMVRPp7ITbEG2P+OAKTxQaR8S8WJH0uFTRGLXDgB2DyHs6yH/+DBYFcXTSf3Qv4X3Mu9CIR5c8Nh1MN4xxhGtkYGDAdMBdyZn7ru+6f98pGoOk49/dl36DMWh3G4sPjSXATu+qMxqOBXZ+5v8+k4ziUHYXZlKF3/QCAiGROWD2SU2umOJI3pF4kvkqD3Zi4Upuiojnd0QkD2K2s0IKjX7rc+zbqIuDfj8Si9I0yzbcXZvCCSPkX3iZ3CJFIhFCVCDfLROQUXWjlJQlEzsMO3yj+PNgBVQ8ssrVGrDyZ4vY+s9WGI3HZLkRO3Si/EtusPZeJteeAUB8FVtQHx3JKg9hpDZPKAasJ8lUTXX1aAMCsR9iWp/Fih4WutlqaFKZPJTtUDdDnAA1HsXkRv7ek55xJByyfSD8UvYZEjXcYlV37plEBIpYyPtuZxHHG6YVU855e6Hr7iTnA2PWwKYMQciMfUrEI2QUGBD4sfh0WE7DnS6DpRFcSxxmH/kdvwvWn+HtgLBXNxcfq7Nj1OdD3RxpQ5yWRPE89DcwfBJHZAEX9UTAETYBBb0O46JzjvdekAJc3kFzb9QWQm8CaL6Yj0PpZqqbEUqTIKiC3MBEysRgjmlfC8N8PIcLPCzPGNMVPO65hUONoyCQivLXyHL56rD4KTRYEqhUQiygGUkjF+H54I/y84xpOJuXePmxfLym+GdIAWuNVnEumYfrWC2mwWW14+9FaUMnFKDSWXHMi/LxKJWCaVQ6AWi7B/ms5sFjdjxadTMxFns54R0ROusaApGz3I0wHrmchW2u8TeSk5xvw62434RggUbX2dApeeKT6Xz7nnSLSX4lFk1vii42Xsel8KixWG+pE+uKjfnXuK2EkQMB/CeVO5Jw5cwZdu3aFn58f4uPjMWnSJAQGBmLFihVITEzEnDlzyvuQBNxHWG025Opoltu0UgDGtqmM4b8dwjOdqmDB4QSYLFyorHKf0ufSVcEOrxu/aMBipunxkDkkcQqzeNE1q5tDGSOW00Bv5WRXEgdgAbbhVaDbJyUjSZ1hs3F22xNy4lmoBFVx7d45I6gaR7pCa7OgKQ7faAcBUxxXNzMpK+Ukf1f4ksxa+STVRv3+Rwm4byS7WRmXuJ3VxAtPu4pJWpQApMvl79FNHSafITX5b3GFE8ARrptH6QMgQMAdIv5eosc1RVLr+5iAFqwUI0lTBiJHFcTPkGB4LOA/DrPVBmMpSoLMAtdxkWqh3ogOUOJmDi8iRSJ2xvUmCya0jcGMvTfwSssAeKsC2aAoDnVICf8iUUGG+20BIC8JrULNsHkVXfxaLVTHJB9nE0jqxeZPu1eBvV9z1Mq+XhaHPg9Y9yLHhy5vZCMp7yZrFFUg/9XlejwXsBjdK3RNhYBBA5F3GIY3r4iMAj1k0ofIbNuQT7+/m0f4hnr5c9zc2YfQTsbY4V+JCmBPyInj4xcMpTq5wTBHwAQA+Zm5aNe/L1L8K8FysxgZdGYxR7XavgAoA6ncOrccWD4B0OXA0OUTXNAo8PnAeghUy/HswpNIydUjJVeP9WduoV/DSET5e2HY7xxVPxafg/bVQrD5fCpGtqiEuYcS0LdhJNacSrlN4rSKDUKjiv4wWWz4fttVvNq9Bsb8cRShPgp82yscTfwKIL+1CYdGR+NQlhJvbU1HZgFrYJEImDqgHkK9FagR7o3Lqa5qNLEIeOfR2vBXyW/Xxp7gLj270GBGptYIo9kCtUKKMB+v2/W3Jzj7VlqtNmSUMvaVlFNKLVxGVAxU48vB9fFGz5qw2mzwVkgR9LAQmwIEPIAodyLnpZdewtixY/Hll1/Cx8fhw9GrVy+MGDGivA9HwH2GGCI0rxyIPVcz8WTHKpiy4AT0JiuUcimOxufc3m5XsgjdGoyA6KQb4q5yO+DWacd4UctnOAoV2YCeB+tfAqp0IbHhXDzZzCSG7DPZxRG3myMTzkWHOxQ3V3aGbxS7Qf1+4vx+cXM2kYgxp7dOA31/AGb3LWm+7B1CcirPjYN/6lkgqgnQ+jl66GgzuU9lIJMygqqQyJHIgKHzHPGY2iz6evT8kgSOTEWzaN8IIO0CiRm7Yqdia+DcCs+v8cxikkkCBNwhErML4eMlhbeiDEtK/i124pWljAbeJUJUIlzKLgORI5bwYtLdZ1OAgP8QVHIpqoR443qG+1Go5jGuCrpwPyXmT2yB15efRpsoOfpUlUGZnwCVjz8KlFZMWpWFUUk5WNzvN8gXD3X1jxNLaXbsE+H6JNbSL0olmiSg4BZQpRPJmMRDjLa2WdgAiWrCte78CiY7BtdgcqM7yL259vtXAJJucS31i2LjxzeKozueSCiRmK+hxAHKAYUfYDUhUpSDBInvfRsfKR+I6B9k1jOEQZPC+kIkAQ78yAaZOoRKJjuM2pLx7M7wCXckY9rHsIoh7OKfSG1RD5IKTUo+Pu0cSbfAWNi6T4XoyiZYI5tC1+I5FAbUQLhGCoPZitM3c/HiI9WgkEogEYuQrtEjxFsBL7nktvpm6bEk/DSiMV5cfAqTO1TB+31qo1KgCpPnHUeUvxKf9K+LI3HZ2H0lA0qZBI81rYBAbwXWPNMa1eRZUC5+jEptAL4AugVXR92RczBmZToqBKrwYtfqqBKqRp7OhE/718PiY0lYezoFepMVjSr4453etVEjnNdBDaI9e3dVC/WGX7G0s5RcHb7YdAnrz9yC2WpDiLcCr/WogWaVPTdEFFKxy8iRUi5Bo4r+LrW5MzrU+HtGAdUKKdRlqRUECBBQAuX+STp69Ch+/fXXErdHRUUhNbWUCEgBDwVsIhvGt41BSq4eaRo9utcJR9NKAagf7Ydgb8Vt07fnV8fh8NMvwlcsgejUfC7sIjFQ41Gg0Qhg2QSqSlo+Q++MKp2B1HNA8hEqc2TKkrP2IknpXTOA3aTk454NhCu1dRQZxeEdxiIm8zL30f9XYMMrHFMCqJ7p+j69dk4v4nOMXg1c2kCz4YDKjBq/to3pUNveL/kcMiULQu9wjj3ZZcVSBefD086R6Irfy+JJIufx6nNJEElk7E5pnCTx1R4BYjqQdBq5jKaAZ5eUfG6xlDHsUU2obhIg4A6RkKUte/S4JoVEpfNYxT0iRCXC9gQrrDYbxHcbaa4WiBwBAkJ8FPigb22MmnmkxH2NK/qjYmDJhkelIDXmDo2FZMeHEC9ccPt2H2UAZvaZi6d3Ah+eCcCHkw9AeuJPIO0smwxNxnFdL/4d4B1OZY3Z1VMDANdbuTcTr0JrM2Gp0ePA2ue5HgJsgtQfyvCB9S8BI1cAh35xrwRu9Qyw6mmgzbP00Vs52bUpFFYXeGw205R0xS58a/YGru8ouc+6gwBVAESF2fDNiUelyLYP1wWsTygbQdd30EvI3rjy8qdH35HfgZh2DJsAeI66vMvkOLmapE5xNJvE9w0AqncDEvaV2ERs0ABWM6xKX47Cx+0puZ/O70GkSYGt3SswhdRFnM4bg39g41AmEaF/oyjUivDFlAUnYDBbERusxheD6iPC1wuP1A7D5vNp0OjNeHf1OXwxuD7Op2iQkKVHowr+AIDPBtbDq8tOI03jUKwcS8hBpxoh+KJHJJQrxtwmcW4j8woitz2D1eMXwuoVBG8vKdLz9Rj3x1FczyhA9zrh+GpwA0jEIlxNL0BmgeG2YXiwtwJPdojF9GKjTlKxCJ8NrAd/pQzJuTpoDWYopGI8Nf8ELqQ4CLSMAgNeXXYGXz9WH5PbxeDXvXEojnFtKiPEx7FO+6vkeLNnLQyafqBETzLc1wtNK92/5ooAAQL+HpR7BqJCoYBGoylx+5UrVxAS8hdKCQEPPBQSCRKytPh1dBOE+CgwulVlbL+UjleWncFjTR1mgHqTFc1/voQtkU/COGkvbBO20gen45uAdyQw4Fd21rQZlO7G7QGO/soUp7YvMymhVl/HE4fUBLp/ytEITxBLOHe/cyoLu6hi3Z6opkCnN6nyqdnb9T7/SsDA3xzFnE8kEFoLmLAFGLseGLeJSVbnV5HEAUgUzerG+e+mE4BHPmEBs/sLEjZNJ7j62qiDue/8FGDLW66+AGYDC1G5mtHkAMkdm4UETMunSL4sGe1K4gA0dT45l9uH1gEKM4E6A123qTMQmLyXBfPer4F1zwM3djvGXgQIKAXxmYUIKXP0eMp9HasCSOQYrUBmYRniTNUhQG7ifT0eAQIeRjSqGIAFE1ugRhhVAyq5BBPbxeCXkY3df96tFsjOLoL49ALX23U5CFs5BJ929sf842mIQxTQ7RMYH1uAW83fxrRTNry59iquJ96EKf0KFa058SQMun5Q8nkUvsCYNRzTST4BrH4KyE8FVj7hIHEAEg+nF1FVE9UEOPSzw5/FDomMaUlZV4Hs60BhDsmg4sretHPA9o9I8jgjtjOPMb4YIVG9Oz388tMAuRoiQz5Uplw8VJAoqGI+/oer+lifS6Kry3tA9Z40dR42nwle2XFURw38zVX9LBIDjcfQe8+goTKnWjfX0IkiaKv1xYebEyCWyIEeXwDtXuHfAsAabNBM4NoWYP1LEK18AorfWqPitfkY05j1n8liw6qTydhzJR1v9qoJsQi4kanFmD+OoNBowTuP1r5N2CRkFWLi7GM4dzMPI5pXRIHBgkGNo7D+7C0XEseOnZcz4GPN4d+DOyQfh8qUA28v6e39X0rNh8liw7ozt/DswpN4ev4JfLf1Cj5Ycx6pRca/PkoZJrePxayxTdG4YgCiA5To1zASG55rh8rBKvyxPx49p+3BwF8O4FxynguJ44wvN13GiJaVMLJFRciLYu7Vcgle6FoNE9vGlogUrxnug7njWyAmmH5XIhHQpVYoFk9uiUj/UtTpAgQIeCBQ7q2Bvn374qOPPsKSJVQEiEQiJCYm4vXXX8egQYPK+3AE3GcEqOVoXz0EUxacxPi2MZi6/iLii4xQYwIU2DGxCiz6fBhFCiy6aMBzK+MwY3QTtLLEQXp6HsmGat2B9PPA/u85PrTzU9eZ64T9QMVWQK+v6QsTXg+o+ghN+bQZQNWuVL0UR93BLBrsCpruU1mQ6HJYcMhUwMJh7CK1mgKMXEovHoUvPW+ST1L9supJYORKFodXNwEX1wGwAWPWMw7cGTYb0xx6fslUKWMBULMPDRm7vAeM38yujkwFmHUkqpIOeT7BpxZQYaMMYPpD//+xwLm6lUohT4aEpxeQ7JnRhXPvj/3JMa2s6yTBWj9L0snglFByZSPQ7mUWad4PWdqGgHJFQrYWrWKD/3pDd8hLJlF6HxGqYgGblG9FqPou+xXqEHpCCBDwH4e3QorWVYMxf1IL6IwWSMQiBHvLIZd6MC0uSAMOfO/+PlMhgrNPoEpIJUjEIugswOYrBXhxySn4KWVYNiwalbc/BUlCkfpCIqN6o9UUqnX2fsO1MqQG1/5Vz7BJAlDVc2UTGx7ucGwmm0R2tU7vaVzTbTYAVkDuC/zZk9uG1mLMtjvcPEpl7PCFJCuUgSQkFo8Cmk/i6JY2nftOPEx17LhNjFKv+Shs7hQqDzLyU1mHuYPZwFrl3EpHYpWXH02jjQXAjk+Bbh+zeWXUkqy/vJEq5jHrGFF+fHZJbyG/CtBX7IBXVGZIj/1KJVCtvsColay/cuKAHR+XSAX1PfgFRox4FDOOApPaxaJZ5UCcuZkHqw2YOaYZtl1Mw/zDiVhxMhmT2sZgfNsYeMnESMnVI8hbjkKjBZPnHUekvxKvda+BiXPcx5yLRIC+QINSKQ6n9/m0k2FycdzK00NncowYBqgV6FwzDI0rBsBotsLbSwqZRIw/9sdh6gb6O9UI88GVNA/Jb6CBscVqw7u9a+PJDlWgM1mgkksQ5quATFLyc6tSSNG2WjCWTG6JfL0ZErEIgWo5fLwephFAAQL+uyh3Iuebb77B4MGDERoaCp1Ohw4dOiA1NRWtWrXCp59+Wt6HI+A+I02jw9JjN9llsOE2ifNjvwpokjwXPsd+IpEikeH9Lh/go449YEvYBFHKcaBSa5IwB34EavQAGj5Ozxh3xnmJBzmfPXQ+u2gLhvL2/T8Ag2eS3LiwknP4Ehnl1bEd2UWq0RM4vRio1pWPkymBBsNZGOjzivYzjT8yJQuWoKrAkLnAbx2A9q8Cx2cCF9dShVOlC2DQ0khx1ErgwE+O567VB2j7Igkeow44NQ9oPYWEy5HfedG44VWaFVstQI/PqUDyhNxEIKQW0H8658TTzlFFE1SF8/yeYDbQb8dO1Gx4hfu4tB6o+Siw+U1XEseOvd8A9QYLRI4Aj9CbLEjTGBDuV8bo8fzUkuq4e0SIiuNUN/NtaBJ+lw9Wh1DVZrVQxSdAwH8cwXdqRmoxeTYnBqDIu4ZWVRojUC1HRr4BLy89DZsN+OyRMFTdNsE1HMBi4hhU7f7Atg+A2v2ARqM4FnV9h4PEAThWXNq6mZ/q8OBKOsKfSTuAuQNJ7Aye5SB2pH+hQtAkA8vGMtSgw2skidLPA+tfplqkzgASDQDHwmUq4NgMoPHjsMk8+6A8kLBZ6GHmCZnXAXWRCjqsDtD1Q9ZWLSYDBakMaRBL6INjH4+LaMDv1zn9gLEbGC5xdhG3qfcYbM0mQCZWoln815Ccns3HSKSMOm84gvv0AO+UA/hyUG8cvJGNCbNdiZiJ7WLwdMcqOJesgdZoQaiPAo/PPAx/pRz5BhNCfBTwU8pw6ZYG/qX4GNlsgEnhT0an+DwSQOWRk99baaoWhVQMmaTk6K89VQoAknMK8dMOR4MwT2dyGY9yv08xvGQSVAi8c2PtEB8vhPj89XYCBAh4sFDuRI6fnx+2bt2Kffv24cyZMygoKEDjxo3RtWvXMu/z559/xldffYXU1FQ0aNAAP/74I5o3b+522xUrVmDq1Km4du0aTCYTqlWrhpdffhmjRo0q8/MLIDQ6EzILjFh67CaeaB+LhGySOKOahqOXbh0k+78gwdLyKaBKZ0j9KwGHfqZHjr2TtuNjYMRi4MQ8zs1veMnxBApfzpz7RbOYurSR41C7v3BsY9YDy8azoGg1hRGVYgm7dSsn07Cw41skh9TBQMWWNEiEyEPqRFGMo80KZF4BHvsDuLKFJsPtXiHZc3ULVStp54CUU0Dnd4DmE/m4lJPAH71ooFijF3DkV5I57V8DKjQDZGoeh3cY1S9iCbuLxVO37AirAxz9nUkP+hxHcZSfxoh1T/DydzWT1WYCC4fymPyii86BB8TtpQeBAAFukFj0OS9TYpU2jRdmqjKqeTxAKRXBR46yRZB7h/KYCtKYDidAgIA7g1RBdV1ugtu79aFNMKFSDPxVcuy4lA6L1QaFVIz6vgXuEx4lMl70Z17hhTwAtH0JuLHLdbvsOKpyPSG4uuu4ZO3+bNrYx7DOLAGaP8F0RxFKuUgXMbXysTlU8u77znUEy2bhD8A1veMbPHa/aNjEUhgUwXio8nmkCiqgPNUjlduSyGk6nmqkgnQqjze+DvT+nqNuJp3D2FodTCXy6mf4HbtrKtD+FaDFEyR+Ts6H6PdO8B06Hzg7z/E8InHRuS19VNZbZkXDMAl+2FGSTJyxNw4/DW8Ek8WK9Hw9aoT7YEqnqth/PQuT2sUiPV+PvEITGlbwh0IqRr+GkZh9wP3fcbbND341B0Bx0U1oRMPHmahahHpRflDKJC7KGzuGNK1QKikDAAUGCzR6h6dTqkaPEB8F1HIJtMaS+xzYKOov9ylAgIB/D/4x17W2bduibdu297yfxYsX46WXXsL06dPRokULTJs2Dd27d8fly5cRGlpSRRAYGIi3334bNWvWhFwux7p16zBu3DiEhoaie/fu93w8/1XkFhqRqtFDIhJhSueqyMjXw18lR5Bajrc7BELy248kGjq/DRz6H3D4VxZptfoCwxYCa6awYDPpgKXjaMp7bZuDXKk/lB25k/No9BtcncoWmdIR1W2H2QBseoNxo21f4piEWA48+i0Nh7OuAYEx9N3p8Tm7dTkJlFQXLxDtqDeE6VMrnneQJxVb0E8GKCr8rJQUb3qj5ONtUdwGoOx22/skVh5fyTGtnBvA8vHcx5C5wPmVJQ0ZJXJ2+xYO5Tno8AbPG8DHqUP4GtwpmJpP4vn2i6b6yK8CybDTCxntXhrcEVwCBBQhPvNeoseL/JzU95fIAYBQpRhJZSFy7L4OeTcFIkeAgLuBTzgN/5eNd3NfBHxjmiA4kF4c+UUXp0FqOaR57i+Y2WApdrEqctN0yb7BtU0d4n68uNM79MBp9QwQ3ZwqE2fD4iubgCZj2ZSRq+mRd3Ftyf3U6suQgSUeGn/1hvCYe31Fo+b1LwNVugLVe8Cm8EWhTQZfD6/0gYQykJ6EK58oeZ86BAivC/yvNVU2A34FDvzAcwkwRfTxFQyGyLzK2sQvmgrk/KJgk5tHmas9o4urobUz+QOw0dTxTSqfopsCN92PPSnCaqDK5rFY1mESDpmr4/m1N2F14n7WnrmFKZ2qwGSxwV8lx7jWldG4UgAmzj4Gg9nxN1U/2g/fDWmIzefSkKpxNdruUD0Eh26ZkFfrFTRWBkJ2Zi5rTqkXbE3GQ9T2BUDhfXv7CD8vzBnfHGP/OOJCvDSvHIgnOsRC4WlM0f4WyMSQiEWwOL2QX3Zew9dDGuDVpWdQYHDUb00rBeD5rtVK+OAIECDg34tyJ3J++OEHt7eLRCJ4eXmhatWqaN++PSRuZjnd4dtvv8WkSZMwbtw4AMD06dOxfv16zJo1C2+8UfKCumPHji6/P//885g9ezb27dsnEDllRG6hEStPJKNulB98vKQQiYCfdl7HzDFN8dXg+jBrk1mMdXmXSUz2+WGzgcqWxIP0kFn8OG8vzOI22XH0tbmxk8qZRU7x9FnXgMsbgH6/ADV6A5fcFF1XNjGhKaY9u0UBlYGDPwGX1lH6G1KTBsJZ1zk61ecHjngkH3fdT0AMUHcgsPtLR7EhV1PVYkfcHhIkCQfcn6QavYDrO11v0+Ww+5cXT98d+3k58huLou0fOrqIATE0VNz/PSXneTdpGigS0eC5+1Sex26fcLTrRtFzyVRAs4ncxqwHurxPVVDmVY5jdXyTqSBRjWka6Q4x7d3fLkAAqMhRSMWlytE9Iu8mCUav+395E6QS3RuRk5sIVHCv7BQgQIAHxHZi02T7R7cVL7bo5hANmA5FUIXbm9ljknN1Jph9PBCmFiObNc7JVTd2ATV7OXxZ7Nj6HjBgOr1ZUorWMmUAla4pJ4DkY0CH1znGLBJROSRVONTAEjkbG1YzzXRlSkaXW80MFKg/hEoeYwEQ3sB1tAvgelq9O42ad39JLxcA8AmFrUYvFEgCIHUON3gYYNbzdXb7hGPWdvLL3pTLvMrI+K4f8jy1e9VB5CQeAvZ8DVRuw7G3+L0lU57UofTVkchciRxpMUVJ3YFcI5KPUwm9dGzJJLOGI6geTjqMkKTD6N1wBLpMeRYZeVpc18rx2d5cZBUYUKA3o1KRsW+e3oxJc1xJHAA4czMP8w4lYM6E5lh/5ha2X0qDSi5Fn/qRkIhFeH/NOVhtwNonX4Ot6lgorDoYxUqER1VCoI/rWiaViFE1VI3Z45vjSloBsrVGVA31xs2cQry5/Ay+GdLwdhMkXaNHttYIg9mKQLUcIT4KBHkr0LNuONadcYy4nb6Zh+m7ruO7oQ0AiJBVYECdSF9E+CvvfAxSgAAB/wqUO5Hz3XffISMjA4WFhQgI4KhHTk4OVCoVvL29kZ6ejtjYWOzcuRMVKlQodV9GoxHHjx/Hm2++efs2sViMrl274uDBg395LDabDTt27MDly5fxxRdfuN3GYDDAYHAY6LlL3PqvI12jR4UgFSRiQCYRYfb+eLzRsyYi/ZWw2mxQir1IyJxe4j6OMu8miZnwevTEAVgsBcTyQiq0NqW47rDpDZoSuyNyRGJGgOcm0T9mVjcWIjV68r4lox3bJh8HZnWnAV/aOeDEHBZ8tfrS52bnZ0y0imxEMqggzVVFkHae3fuwuiXTDIKqAJENHdJwO5QBVCQYta7dwatbeE6GzOXFpEjE59v5KWXaITWpRvIOA8ZuZOzqlU18/qXjgKbjqMCxFBWsZ5fSv0emZEHV/jV2vGRKvm5jIdD3Z2BGZ8comR3NnwCUpSSBlSOEz+KDifgsLcJ8vSC625hvgH/n6hDX9Lb7hBClCGczS0rP/xJyNQ06hQhyjxA+iw85jFqOwRRmcR1QhTBq+n5AFch0omrdSORIvSBSBZVIpgv3U6Bb7TBsuZCG68ZARPlXdJsWZ82Jh6HrZ1BuepFjx80n8d+zy7ge2pF9A9j7HdUwBalUmlpNNNSN38ttaj7KRocmGabH5kHS40uINUlAxbYcqZywlefFagHqDOKPWU+PFmOhY5Rq/GamVJ1ewG1r9AQi6gNz+gL1h7HxlBPHUela/SC6sgnqOv2RbwsB7vNw1d/6WTQbSWY9NoevWZ9HUs1qAVZNBlo8TXPjzW+xdpuwBYjpAMTt5uPj97BptuNj92NRjUcBh6ezkbfqKcftKSfpmZhwgCRaZGPWNn1/5N/syGXAqflMBlWHcFxfkwxseef2LkSnFkBVqw8qrRiHSupg1On2M3YVhCLS3wvhRcTJ2Zu50JtI4iikYrzYNhTdY6SQmzQwy4zwEWvRtXYoZBIRcgpNmLHvBhKKfB+HN6+ANeezMX2342/2lxFB6FW/ZFMiPqsQg6cfRIi3Aj5eUqTk6W4/7/qztzC2VSVcSS/AU/NOIK5I4SqXiPFUxyoY3boS3upVCzdzCnEqKe/2Pm/m6BDlr0StCN+yrb0CBAj4V6DciZypU6fit99+w4wZM1ClShUAwLVr1zB58mQ88cQTaNOmDYYNG4YXX3wRy5YtK3VfmZmZsFgsCAsLc7k9LCwMly5d8vi4vLw8REVFwWAwQCKR4JdffsEjj7ifr/7ss8/w4YceEgwEAAD0ZisUEjH+t/s6etWNwNu9a0FvsmDy3OPQmSzYNS4K6hZPAguGeN5J3F6SJKlneVEXUJnkxvaPmHzgHCnqDIOGnaDiSVUSGVUtvtFc+M+vpLRXLAMajeYYU3EYC4C5/YAn9wHB1Xgc8ftocNx0Ig2Bdbns8IXWYgERGOvoMq17kaqerGvAxTUsdmr3o6Jl8ciSzxdcHUi7CCjdqBHSL5B0WTHJtfMU3Qxo8zxn0DXJvE3qxTnz0DosHN2lTIzdwNd3YQ27ZHbEtAdaP8eL10m7aC6ZdIivrdUzQERDwPdu3WL/HgifxQcTCVmFCPMt48VJbtJ998exI1QlQkqBDRarDRLxXRa66hCByCkFwmfxIUZBOpsKx2Y6xldCajAhMrj6/XkOiZS+cPDcjAtUK/DJgLpoVSUI3x1KQeyj8xG9aSwVsnbU6AVb7QE4nWxAtdE7EGhIgWj9S1zz+/xAUuXSOgAioPFoJjouGMrGBcBRnqBqrC1STjIowL8i9IPnQ5Z1AeLopsC29WwWnZzDkAI74RDRkH4ua5/lORv4O/dr0LA2ubSWQQdiMc2X7V59h/8HDF8EnFtGZVLqOSBhP8TVe8DyN1Tcf+tnUe4NjF4NbHoTuL7dcXtMe+Cx2VQznZxLNTBAJVLn94DEA8D5Fbxflwv0/QVYO8V1XKpmb5Jn2z9i/eYb5ahpDk8HBv8B6N8HDHlFYREFwLJxfC9r9OLofEwHpnXKvYHNxWLhASaHhdUG/s/eWYdHcXdt+F5JNu4egiS4u7sXKFCKFgr1lrrLW3nfyld3pUUqWKFIS1vcocVdQgghQiDuupuV74+TZbPJbpAWCnTu68rVsjM7M7uR+e05z3mes/sJ/Xk8I+/ZRoLehLryfuDpqmJ0mzD2phTw5c3hNNv/X1x2rjn/dEu9nriP+ILy6ED+TMghwtedpmE+DG8VRmJOKR+tO2l3upMZxQyrdgkms4V5O6XYk1WsJ6vYPlntuz+S6N8khInf7CS/tOL84waTmU82xBPu68aETlHMmtqJ9MJy4jOLCPNxo16gJ+G+l9lAUVBQuGG46oWcl156iaVLl54v4gA0bNiQ999/n1tvvZXTp0/z7rvvXtEocm9vbw4ePEhxcTEbNmzgySefJDo6usbYFcALL7zAk0/aDHcLCwsvqBT6N2E2WzCZLDyz5DDpheUMaRGGTqth+vz9fDwsgnZuaXioKqQ75ebjPAHBzVc8aAA63AWo5aYf1kIKMdVRqWXR1vRmeW6ne6DPC5CwXs5Tv5eMPm3/UFKXGg+xyah13jWVJ1YMJVCUCd8Ntz3mHQYN+8OuL2WRl7pHHh/xkSw2frxNFiDlBfDTNPGxGfstHPsZTq2TcS2zA2WAizu4+0q0qiOOLpXu45+f2R7r85woiazvFUihZ+Mbski943e59qoKn6hu0g3d/qH9YgxkJMxiljjXkGYw9E0oyQMX3TWXVKX8Ll6bJGWX0CbK7/KeXJAinhVXgCAPNUYzZJRaiPC61EJOkFybgkOU38XrFJNRFCq7v7F/PCsOvh8J92yoPf3wbybE2407utdnWKtwsIDh9t9wLcuW+5dPOHgEo/HwJ6yiBHdDOar5d8m4Fcgodp1O8qG+6QgZ8SnLlXvkmhdg8P9JEmPaIbmXDX0bo08U+lvn4lGUgkrnJeNYnkEyrnWsmnFt2kFY9Sz0e1ESJlUauS5XT1lDJG6Vr+qYK01579sq41yHFkC7yYAFl0stKF8EV/R3UYW8l47WDSuflmJaVU+++DXQYjQcXiTJozEDJRyi1ViYtBiyYmXtEtZa3t9fH5Pnxa2S7+XxykJOeYEUh/q9AG7+Yopv5dwBGelqPR7qdpOmm85bmljV11lqrXjwABj1WA7MJ7jbc1CcgSX9GL0Pf0cvtYqS8Y/iuetN1KfW2D1dlbwd3S/34dl3Bh3r+nFz63Dm7kjilRXH7IouVlrWqZlKZrZYKCgzOH2L/TxcOJya7/B4AB+vj6df0xBCfdwI8tbRMvI6Sz5TUFC4olz1Qk5aWhpGY01zVaPRSHq6GKBFRERQVOQgCrkaQUFBaDQaMjIy7B7PyMggLMy5ikCtVtOwYUMA2rZtS2xsLG+99ZbDQo5Op0OnU2ZOa+Pw2YLzhnCRvu58tCGeL0dG0uv4/3BN3CAdne0fi+R4g5POUbObpaM19G0pfJzbC+telljQsjzxeLEWdNx8ZRb+9GbZx1Ai0eJ9/wOl2dKhO7wYhr4rRZXkP22mxAAx/Wp/QRqNJGA0HyWKGzdf2DPLlpxgZdObYuY34GWZ9c44JkUZz2CwABtfk/1WPy8z9wfmyoJFrYFW48SI2WKSufwmN8m2qhxbDneuFsXCjs/l2Kl77Is4Vdn8poyGPbBduppFaVKc8Y6Akkx7xVJVkrbbjJVdveTrGkT5Xbz2MBjNnM0vY0iLy1Bt6QvFONQr+O+/MGwR5GeLzER4XeLolmeIzeNCoQbK7+J1SnE67PjM8baitMqEpatXyAHxR7QZpUeAX02/nCiPCtT7F9iKOFZS98jXmT3SzLGYZOR42m/w84MSdGBl2wfox/yAq4c/qnk3w8T5UqCYMA+W3u344nJOycjy6K9s491dp0OFgxHxqlSUwexBogbp+QT4RKIqL8TPtRi4+Ejoi+GK/i4aSuDUBsfbkrbL66zqKah1A32xjJdbzKKYaTwYynJkFMvVG7Suogqu+r3UeckoOIgyp8dj0nT78TaJnG8zURpfFWVSBBr4P9g7R8IetG5S1Jn0o4xnVfUujOokyaKVeGQdQqcuxLL8IVSVxSkV4N12IsQ5GM8H1Km78DblM+zbWNrX9ePWDnUcFl2CvXQ0D6+prnbRqBnZNpL1sZkOj39TyzBi05x/3kkvLMdQzcNHQUFBwcpVd17r168f999/PwcO2JKGDhw4wPTp0+nfvz8AR44coUGDBhc8lqurKx06dGDDBtuNxmw2s2HDBrp163bR12Q2m+1mjBUujZ2nbYoZH3cXCssMtDLHShEHpLtybr8siKL71jxA90ck/WDk59LdmT1Iihfl+aI0CWkON70nBRCQYs/GNyT5qixPFgTxa+HboVIs6v6omO95hchzqhZxQLp0ztQmPhHS3Wk9Xsz9vhsm3joqlUiJtVWSeUqyZLGy6jk55p5ZspBY/Ry4uIk0G2RxvGiKFEhGfQ4jPpYkiNTdUrwJbgItx8kok1tlt8U3Cm75RgyN49fKwmXo29KJckZ2vMy0+9aB6D6y+AlvIx+UHXkT2b0nxbVvV1BwwNn8MsyWy0ysyj8j//W8MsqvYHcp5FxeBLkyWqVwA1JRLvdjZziLmf6H0ZRloXIUTw5yv+vxGBz+EZbcIffs5fdDz8fs1xtmI54/34mLsUgaFxWVI8sWs83w2BEFlWrb0mwY9LoECJTmOR9D0/nIesFQDGd2SzGiKB0OzMNdfYGEyGuN2n5WQNZopirvXeuJosppN1U8/k5tlLF5j0DxScxLlJ+x6gW5VuNkXOvO1TD+B/H1W/eybItoK35IIz6WNdSgV0WVfOI3+b6VF4hiavXzoiy20vk+GXmrkv5pDm6GJvf0+SLOeUyGWqPNteW5uGrU7E/Jp1WEL88MaYybi+3jU/NwHxbe15UIP3eHz+9Uz58GlQbLVfFw1TC8VTjNIpyb/Yd463DVXGcm2QoKCleNq67ImT17NrfffjsdOnTAxUVSToxGIwMGDGD27NkAeHl58cEHH1zU8Z588kmmTZtGx44d6dy5Mx9//DElJSXnU6ymTp1KZGQkb731FiDzxB07diQmJga9Xs/KlSuZO3cuX3311RV4tTc25RUmisorCPez/xA3obUfIcfesj2g1ojEdcUjUpDocIfc3LWucvP2CJYIS2tHpkFvkd1aLNLtKS+AY0vF4C5hE6AS9Ut1jOWw7UPo9QRU6OHAfHAUa7rjCzHXW3af/YJCq5POW26SeNNYMZRI9ycrDvq/ZDPU8wwGLNDpXjg4D/Irz+XqJcWg0V/BtzfJYqeiVPx/3P1FBr7rS4lhBxn76nQv1Ospo2I6L+lcBdQXr5yUHZD8hyiVGg5w/g0JalQzqtWKu7/z50ENI0oFhYshKecvRI/np4jKzePKmGm7aVX46iC12PkC3SmewZWKoQJbcVVB4XrHxU1+np19QA9ucnWv52KJ3yj3N0eq0qFvwy/TJTjAytl9kmw0caGkMeorDYCNevGe84mQ9wLkb1DV9KrqBETLePUt30izJKyV/Hv0l/DDaCnYWFFr4Ka3bfd2kHXM6udFMXy9pVa5+dW+XedtK4AENZaghbJ8GVc7u1fMpXd8LoEMzUaIitrqX2Sl0z2ioio4I95NpzfajukRIB5HOm/wCod7N8mIuMFB4yknQVTbnR+Aup2lKVg1YEKtQd1hmkTCV0fjWhlr7/heUaELwGASWwAXjZr7uoRyR2sPik0uFKs88HN3IbCWtKhwP3fm39OF2dsTWbznDOVGEwObhfLU4CZE+nugVqvwcddSWFaz0PfYgEaEXK4HnYKCwg3PVS/khIWFsW7dOk6cOMHJk2IU1qRJE5o0sS0g+vW7wOhLFSZMmEBWVhavvPIK6enptG3bltWrV583QE5JSUGttt08S0pKePDBB0lNTcXd3Z2mTZsyb948JkyY8De9wn8HGYXlfL7xFMsPnGXBvV2Ysz0JEGO3jlG+cKpyUVSnk6hERs+APd9IuoGbr6QQmCsgeQfc/IktCaHpcImRXFIpde77ghRSTvwui7iRnzsfEQKRSre8RY5XeMbxgjX9COyZDbctlojM9EOyWIjuI8Wk359wfOzkP0RW7e4vZsrdHwP3AJFPl+dLkScvGdrcJsfxDoP7t8o1BcaI6eGx5aLEaTYSxnwjBskn18hXZEcYO1sMmd2thRW1jHgdWy6L077PS0fQ0XhVlwfsFUNV8QyWgtHJNTW3Nehji1yuTlmBFLvcfGpGgir860nOLkGrURHo6XrpT85Plp87zWXEll8kwe7qy1PkWFVCBalKIUfhxsErDLo9ApveqLnNO/zvMzv+uzl3EFqNkft21eZLSHPITbQv4lgxVYhKts0EUWxYKcuXe+iZ3WJWHPsrtJ4gSZXVCWokRYYD8+SeHdNP1MXHlkvD5e61EPsbpB2Q965+Lzmn1fzXSu5pCGgg572e0LhK88jReFX93rI2azpc3seIdnDoR0jeLuNurSeBTxwc/1lSNcd+K+bPWSdEKePqKWNT+kJZG3mFi1JapQGLUdTE/V+GlU/ZlMjD3q99/Re/Fno8Dql77b0F3XzFrNozyHERKGkbNL4J4lbW2GSK6sof6aLufKZfJI3N8ah/fgeXjON4BDQgtPcz4NWaC6WRRfi589zQJtzTS6YNfNxc8NTJR7BIP3d+vLcb983dS2qe+De6aFTc0zOaoS3DFENjBQUFp1z1Qo6Vpk2b0rRp07/lWA8//DAPP/yww22bN2+2+/cbb7zBG284WMQoXDT5pQZe+vko645nMLRFGDlFep4b2oR318RJOoyHLyUtJuHZ60mZld5Wqa5qMVrGnn59FE5vgjodxV9m/SuyICrJgdhfJGbSWC5dM796tuKEUS8mes7MgUEWV6jkvGmHpCh0cnXN/ZK2QVsxIETnI4uzHZ/J+JSjRaGVwnOyICjJkkVkSZYoCgrOSORmaCtJsdj2vvy71QRoPlISu6pGpabslCLXsPdtsZtn98qCdfuHcuwej8lCqvV4WZTG/S6pEGO+sU+tcnGX91Wjc+5v4+4n0uRfH5PFjpXofjLuVV2RU5IthaM/PpbX2KAvdH0A/OpLIomCAhKrGurtdj4F5JLIS75i/jhWAt1VnLms0arKQk7+GQht8fdelILCP4VGCx2myZjQnlm2sZPgpuIVczX8cYrSJAWqvED8UDwC5f5UG63HShrjLV+LuqW40hcxskPNoklVzuwS/7mqhLaQ+/XOr8SDb89sGUM2lEiBxlL59yKinSQwzR8r98jchMrkyjyo1wMaDYTl02XUx2KUsaKvutunTFpRqbC4eKBycz5Cc01ybBkMeQv4j30BpUEfuOkdaTp1e0QKN398IrHrqXtF2dKgpy1JylguyZ0NB0q6WI9HpahVVgBad/jzUyjNxRLdD/P926lAi0rriquhCNWA/4qSKWWn/FfnLWsSB1h0vqB1R1WnA9y12hZnr3YRJZV75YhX2iH7J+6ZDWPnYLFYUJ20eRWa6vakfMQXvP3VSe7qFsUDdZLQzJ56XrmjKjgDiVuxDHkLVctbwds+Qbc6rloN4b41x69UKhXNI3xYOr07uSUGyipMBHnpCPJyxcNVWW8pKCg45x/5C5GamsqKFStISUnBYLCflf3www//iUtSuASyiw2sO55BHX93nhnSmJMZxQR4uLJ0ejdUQFxGCU2aDIRFk+xltBlHpWs19VcoTJXxqKX3yMIo+U+5occMlEWSSi0mvXErZbFWvzckbRWDvbGzZdTJEa3Gi+okqpOMJfnVky/r2JMVFw+5sf/8gP3jKrXj9AMrWp0Y7BWlywKxXk9YWyX2MjteOlCjvpAF5smVYoxYtYhjJXWPFGkComVh2uEOmffv9bTIfLd9KElc+hLp5t23RbqJWjcpJukLRO2k0cLRZTJnrqs5h30enwgpApVkV46M+MhYm0e1sauyPDGerppskh0PB+dLwS2slfNzKPyrSMou+QvR40lXLLHKSrCHisOZTn6Xa8PdX8ZBC878/ReloPBP4hUiBYouD8goiouHKBWuRkphZqzEg1e9H7caD4PfqP1DcGgLMJRKk2Pgf6X5YjHLGuHQAufPc/cX/zorjYfKPd46RrXkLuj6oBQHejwu997iDGmIqDVScLrla0n6OrLIdpzsk5IqOe5bURl3eQD2/QDDPxL33PJ8CVw4V+kFGd0PVdxKjG7+aH3DL/FN+wdx9YQfRkk6Vf+X5b3UeUsTacH4ygjycvn+5CXLe3vLDGlOuXja0jPr94Tmo+V4SdtEudNqHJTmihI6ZScAquyTaA4tQHXHStSnt8Dmt2zmxdH9sHS6F1OFHu3aFxxerqXdZNRzR9mU2Gqt/JxYzDDgv9Ica3EL7P7a3gPNWA5rXyFnzGIK2z+PzlhEhdaLzWctZB0oZ83jPQky56CZOdrh+JVqw/+whLdGpdaC5+WPCof6uF3emLKCgsK/lqteyNmwYQMjR44kOjqaEydO0LJlS5KSkrBYLLRv3/5qX47CZXAuv4yH+jWkaZg3zyw5QmpeKU3CvHk+sikL9qTQt3EwxlNr0VafhQZZAMWthJOrZHbdSsImUZUkbIT938uNOLqv3Hh3fgV9npVRrJQdMo7V9UHY+aX9scNaQ4NeYi5oHa9a9YwUVQ4vlgKLySAqlD7PS4etOgmbRGJ74rea21w8pHtYJOlqtLtdFoLVsZglnWvAK2KUWD3WtCqxv0lBKLqvyL9nD5SFgtYN2t8OZr0sdiI7yvbmo2SsKnWvGAIaiiBmkLyegGjn57Hi7n9hv5yijJrxtCDnXfmM+A5UL/4o/CtJzCmhhYOkjgtSlleZWHUZaVeXQLCHinMlFkxmi6gFLxaVWsa+lEKOwo2IzhN0DaRBcLUoSIXvb66ppjiyWJoM/V4U3zxHeIfBhB/E9HbzO3Lfi+4v98gWtzhv7LSfJgUXd38ptrQYI/f2kZ+JCiM/WUamw1qKQqnlrVKE+e1xyEsStevY7+yLOFb0hbI2ie4DgQ2lELbjc1GyeoWJ8qnDnbDlbej9LCyciLrxULieCjkNB4jp8IJxNbd1ukfWCa0nyJqnyU3w4yRRXne4U9ZJoS2h99Myzr79Q2lE1e8hKZ7GclEWNx9lf1x9Eer1r0JoM/sEqtObUH0/As3kxVhiu6A6s8v+aR3uQ6cvkbH9tINyj6lidCwx8FPALwruXIV510zURxaBSkVps3GcbXgbt/+QdD6BFcRXqU0dX/o2DibINdeWnFodox5VUbp4G/6FQs71QKnBSFp+Ob8cPEdKXin9m4bQsZ6/U6NnBQWFK8tVL+S88MILPP3007z66qt4e3uzdOlSQkJCmDx5MkOHDr3al6NwGYT5Sgf+kYW25LHMIj2TOpeyeE8q93UKwO2Pec4PELcSYvqLvLXNJFkA+NWDVc+KMqfqfqfWi+T7l4ehw1QxDLSYZGHXapyMWpUXyjFMBlh2vxRC/vxMFmsrHpbCTvNRMOpL6bR5h0m6xeD/q3lthxbC+O+l4FRVRaPVyfF2fG77d0WpYxk1yCJG5wOqczaptkMsUlja/KZ9YctYLoUbjQ7qdZcUq453ywdMlRoa+UOTYZUmgKHg/jf6eCRucb4tZYcsdJVCzr8eo8nM2bwy+je9jE6+Ndrbq3Yp+l8lxF2N0QwZpRYivC5x/MszxJaspaCg8NfIOul0JIY9M6Uw4Bfl/Pk+EdDtYSkclOfL2O+CCTIi3f8lSbKsSqMhUK+bjI15h8HG12V0ObId7J8nChOfcFGZnN4iaUud7oL5t9qCF8LbiBLYGQkbJG3SUCRFDKtaIy8R1v9P1ijTfpP/L89HxWUYr/+TaD3F5Pnn++1VynU6il/Q4qkyluYZJB5DJoMU2+7fKqNqdTqJYrnq+Fv8Onm/p1VplgU2lKKbX11RRB1cIM+tTn4yqqx4GP4B5CZiiVsFOi8srSagcfOHU2ukONluiowPrnvFpr5SaUjOLSX5XBYxwUGE9X+Jii4PUGGy8M3eAj79NhGzg29Px7q+RKqyURvLan+v1FqIXQERbS7+/b3OKKswse5YBo8vPnj+R/3nA2cJ83Fj0f1dqRdYiyJcQUHhinDVCzmxsbEsXLhQTq7VUlZWhpeXF6+99hqjRo1i+vTpV/uSFC4RrVrNN1trqllcNCoMJjP5pUbq1pbOYN02eoakGPz2BAx5076IY8VkkOJJi9Gw4TV57NZZsihrcxs0HAQhzURdk3XSlk6R/KeY5o37XswQywukAFSaJeNUZXkyluQZZN/1qSiV6NKbP5Nu3Nl9sgiMaAf758o4FMiC7WISKBI2SUqVs1jXJsNFfVO1iFOVvbPFY2fuaOkG6XykCxnWCn55SMbUgv9mg8rrLVlD4R/hbH4ZRrOFsMuRgucmSpHS88qmpQV72CLII7wu8efaM7jmSKaCgsLl4UgBa8VQAhf6oAziP+cdKmPaBytHqvbOFi+8yT/JfdRYDo0Gy7jOt8PEh27Vs7bf5UaDxddm3UuibAVJypy4EHbPthVx4ML3eZVaitGLpjhOPDryk4xsnVwNYa0wa93RXPhVXjsYy2Tce9KPoqopy5e1UGk2LL1XijsunrLusqqiLGbxO9z0hoyqOfIwMhlg42uSYupXT5TXO76QUXz/+pVFPSdeiGd2wc4vIKIdqg7T4PgvqObfitrdHzreJWNey+6RsfdRX8j4PpDXbDJP/HaW/Sn5eOm0zJrWkfZ1w/DQaujS0A3zpqQap6oX6MHT7Uy4zx8k0fO+dexHsqzofICLXBNex2QVlvPkT4dq/KinF5bz+m/H+XhCO7zcFE8fBYWryVX/q+Pp6XneFyc8PJyEBNvNPTs729nTFK4h0grKqTDVXLQcO1fIzW0iSC51oajVNOcHaD5KTHxPbxLflZDmjos4VhK3yuLBysnVckNd+RTkxMvNf/3/IKSpJGBZE5iS/5DZ9oi2UoBZ/bwoe6xz28eWiWxaV200xNVbijg+UdJt0upE4aPWyGKv5a2yENG4ysw3iKeNq6f8F+T6SnPlGup0cjz2FNFOZscdLQysVJSJuscq6dUXyoInfh10fbj2Dubl0qBPLdv6Xng0S+FfQWK2RI+H+15mIccn/IovfIPcbYWcS8YrWFHkKCj8XYQ0c77N3f/SEp18qpkyH1wgni0nfpO1hJuPFBLMRmnWWIs4Wh3415PGiLWIA7LGiF8L6Yftj5t9Ujz7nNF4mBShck873yflT+j5BAx+A4PGSRjBtYrZAGW5sHiajKef3SepniufkaaXb5SslfJTZT1kxd1PHq8tYSppuzSkUv6EFY/Y/BTzkmDtS3BokYyvV8crRIpnwU1hzhAZsS/PFxXUupfl+9plunjx5J6GyA6Yg1uQEjqQ/Sn5ABTrjUybs5v0AlFUNwv34clBjagaDqVWwawxUbgvu0Magbu+EhV31dcJcg8b8n8yZtZ85CW+wdcX+1PyMTmSLQEbT2SSW2pwuE1BQeHKcdVLp127dmX79u00a9aMYcOG8dRTT3HkyBGWLVtG165dr/blKFwGWideE3N3JPP9XZ15YtFBmo3qildEe1Tn9ktnpfloGQPSF0mnxFAEW9+TJxr14Obn/IQu7tXMh1W27ldFqSh10g7Cd8MlNWHQa5VFFY3Efs8eZG94aKXZSBnNGv2lJGkUnBV1i7rSPLjrdPj2FvtCy66vRD1kMsiM/dC3ZSHnX0+6VR4BkHFcZsO3fyhyYY8gmQk/uRriVsnxm4+SAk/ChtpTuNQa+arOsWVw7+YrM5riFQq9n4Ot79g/7uYrSRUXShhR+FeQnFOKVq0i0PMyzI5zT9X8MHYF0GlV+OlUnCm6jJEGr1AoyYSKcnBRDCgVFP4SAdHg38A2VlmVnk9K/PTF4hUi49kJG22PWSyiGulwByRuF9+4TdXGp5uPknu+o5HoglRRh2QcET+d6L6i1tW6wZRl4reXU0VV5BEAHe+0V/Q6Qq2VEavl92MZOeviX+O1gForI1MTF4hHYUm2rOPKC0Qp3fsZUb94hkgT7efp4lFTXBkGoS9xfmytm6ifNr/lePueb6RxdmCu/XOCGslo90YHo/Eg4/GTf4JdM+DQj5Tf/AVHi3y4e4l9w0xvNPNnQg51Az3x83Dlzh4NGNkmkl2JOZQaTNQP9MSfVNvPa2asqL8m/Shj/1knJLyjyU1wYB6W4GaorsI97Z+kqLzC6TazRcatFRQUri5XvZDz4YcfUlxcDMCrr75KcXExixYtolGjRkpi1XWA2WwhwNMVT1cNJQb7NJicEgMZheWk5JYyZXEyf97zDaqSTOmwHPhBFCoN+ohcV+NuM6I7d0CKI9udfP9bjZeUKyuNBsPq5+T/A2KkiGMlZef5BAQAbnoPBr0hizBTZbdApZICi6FEOig/PyjFIo8gUQh5BsG4H0T6W10tY7FIt2jiQlj5NIS3lVGszOO2fer1lCJR72fEQHH1s1K0aTkO3APkw+HRpbD+vzImZtRLMccalVmVpiPg1Maaj1ssspjSXmZiUG24+0rUeExf+PNzud6Y/jLKVlvRSeFfRWJ2CWG+lxE9btJL0TSy45W5sGoEe6guT5HjWen9U5AKQQ3/3otSUPi34RMBty+Hdf+tNIRVydhNk2HQdhJoLmHoyCNAxmbW/ReOLZVGj9ZNRnKa3CQK3dYT4I7K1EjvcGnWhDSXwoQjjv8iDZcmQ0VJsmC8rWHkEyneeQcWQsZhKfJEdZGRrWY3y2hWogMvHY2L+Mlkn4SMo2i5jAS9fxBVcaYUtQ4ugKM/VXk/ImD8D+I1lBkLxEKjQZIKNvgNCUw4vQ063+v84K0nyBrMUZMN5HtaliffV2O5FJBGfSkx512ny/fTGbmnpdhXXsD+fG9uW+R4RPZUZjHF5RV4ubngXfllMJl4cN5+TmWVsH1KNe/BxK2iso4ZIIqgVhOw/PERls73ow5rKT+XNzAd6zt/fTHBXni7uVzFq1FQUICrXMgxmUykpqbSunVrQMasZsyYcTUvQeEvklZQzturTvDSiOa8uPyInTmcm4uaOv7uNAr14ujZQsotGtwPLUB1oIrxcfZJ6ZjcudLmTxPeRm68PZ+sWcwJjJEumnUcqtFgmc8uzZV/l+dL57w4w/EFF6eDoUwWYRVlEmHqHSoy6t8eg5AWMHaObCs8KzdnFw/pPp343fExzSYZd5qyVCTHVYs4AMnbxeAvsKF0bkBm9w8ukIXitvdtpo9Hl0FgIxjxMfz+pBS9rNTvJaNdP93p+Drc/kaD4+p4BIjJckRb8RJy9b60hbbCDU9idsnlRaXmJoqPwlXqXga5q0gpvJzRKmshJ0Up5Cgo/B24uEOrW2Xk2WKGXk9B3W62cehLwScCbv4Y+v1HijUqNWz7GLZ9JMrc9MPwx6fibXLzp7BwggQjeAQ6HoXSF0pzKeeUNFqqUngW5o8TNYbHdFj5rE1RvO87ua9nnZC4cisqFYz83Kbq8a+P2s370l/nP4mrF5xcI8liVSk8B/PGSgDE/u/kscOL5X1I3Q8/3yePeYdKatXW9+2fHxAt4Q1l+bWf37eOGBt7BMl7mHZQin/dH77AdXuCUY+p4WD2pDv/2x8d7MWxtEK6NJCkqYzCcsoMZh4d2AhvnQuuHvm2QpIVs0nWjxpXLF0fQnXrLFTWEfsbnFAfN25qEcaqY+l2j6tU8PqoFgR7X4HGooKCQq1c1UKORqNh8ODBxMbG4ufndzVPrfA3EZdeyLb4bO7tHc3C+7py9GwBhgozHjotbaL88HbT8kCfGBbvSUFXUWhfxLFiKIa1L0O/lyTm08VDuhwaF5i0EE6ulQJN/Z4ycrX8PklOGPExYJEumJX4DRJFvv6/Nc+j1siCbt3/ILy1fKm1kB0vyiD3QIjsICNSSdukMFJeKGbCzUfVnjZlLSZVL+Kcv641Ej9qTbkC6Txt/0gety5syvJEofDrY6Lg8QoRU2a1K2hdYPO7jmXgvlFXPPEHkO/NpXgXKPxrOJ1VTJsov0t/Yna8/B5ejZ9fRJGzN/0yOuEegfLh0JFSTkFB4dIoTIOld8u93srpTaLMmzBPPLMuFVdPSSkqyYZ5YyQJEyChijfLkmnwwHYpwsSvhfZTbaEF1fGJgOUzHW8ry5P7fUhLOL3R/vHfn4IRH8no1bkDogCK6SfXZTSAUY9l8JtofcMu/TX+k7j5wG4nzdbyfFHrWg2ADcWABX5/zLbPvm9FlVN1XVevhzSKEjaAbyQEN3EcBuERCMVZENxM/g6veASGviVrwvSjonSq6nNkxcVdPJeMeiy9nqJ5ti9Q04cwyt+d6CB3jiRl0DLcm7MFeu76bg+peTbT7RcHN+COXs/isum1mufp+SQq7zBw/fesjwI8XXltdEs6Rwcwc+tpsosNtKvrxws3NaVx2HVWpFRQuEG46mbHLVu25PTpWozhFK4pyiqMpBWUkZZfRnpBGYnZJdzfJ5o/TmVTZjBxNq+MvSl5ZBfryS81kF+qp6t/MbPbnUYT50TRAhJxHdkBojrLTHpUZ9j1tahPClNFwrvjC1n4BTaW7v3ubyThypoq4eopI0AB0dB0uP3xtToYMxvcAmD0V7D7axmJil8HiyZLyoRfXfjzUzj+s5gJ556WAs32j2QBFtzU+fXX7yXKAmdYLPbpF1ZSdsg41vnj9KxMg8iTD7jhrSXJKiBa1Dq9n5aFUlU8AuC2RZe38FVQ+BswGM2czS+7vMSqnFPyQUdzdfoIwe4qMkosGJ2YNDpF4yKdYMXwWEHhr5Oyw76IY+Xs3tpNcS+G8kJbEccRcaugrAA63ycKoHZTa+7T/dHKUZ9C58fJT4GcuJrR2HmJ8ONkOPSjGO3mJcPCSRKRHhANAdGUBbe6rJf2j2I2ygi3M/LP2EZQY/qL4rmiWvrY7pm2dV3Xh8SceMld4nWTvFOadNU9ErVu4rkTGCPFs7mjYMzXskabvARLQSr0fUGKPVVRa2DY+5iSd2O8ewPawBja1fXjtVEtCPJyJSrAnWeGNOG36Z3YcEcd2sZ/zj1nX8b9z/dwL0qivr+9kfH/rU3kYPAozGNmib8TyLpx9AwpUP2LijhWgr113NG9Pssf6sGWZ/ry9e0daFvXHw9XJa1KQeGf4Kr/5r3xxhs8/fTTvP7663To0AFPT3tJoo+Pj5NnKlxtknNK+HRDPL8eklnkoS1Debh/I7RqFftT8rj7+73nHew3xGYSE+zJqon+uM4bLt41DQc6P7g1HqDfyzL7bDFJgSPtoBRbrKi10Pd5WVyN+hLO7JTo0aAm0skxFMOvj8pNvcfjtsQKr1Ap/JzeLGkZXR+CFQ/LQg7khu8RIEocR6x7RZII5o+rqcxpPFRGoDxqSW9Sa+WrNjwCoe1k8etRqUWNo6mcMQ5qKEbK5YUw7TdZKGYckzGssJZXbSxFQcERKbmlmC2XmViVFSed2KtEiIcakwXSii1E+Vyin49XiKLIUVD4q5QXil+cM/bOhqbDan4wv1jUallTOIoAB/GmKzgDOi9Ycid0uV+aIRnHZC0Q0lxGqfMSZczLOvpcncCGsPc7WRt8d1O1EAZE1Zt2EKxNLJVaikMqFaUmLdfbx36LSo3KK8R+ZKwqQQ3h0AJRwDQdbksHq46xXNZ17adJMSamP/jWhQ53iprn7rVS6DuzR9JHGw0GC6K0PrlKjnFwgaz91vwHVevxWNQ6LHetxZz0J5qkLZiDGmNufgvluOIe1R2tiyuYKvD3cMNFreLTSe0orzCz4sAZxgWexvWH284329Sn1hOl/YQPRi9iWqkXJ9KLz1/6lAXxbHlmBGF39ballXpfHTXptYpKpbq8sWoFBYW/nateyBk2bBgAI0eORFUl689isaBSqTCZri8zuBuVM7mljPnyT3JKbHGCKw6lsf1UDj/d341P1sfj6+7CzW0iCPXWkZRTStcwC66/PiQjRBlHof+Lzk8QM0CKLJ7BENoC4tZKwSZho6RK6AtFgjvgv2JevO5leV5oS1GohLaQok3OKSnghLaUGfXCs7Bnpr1JcWasKHQq5bYAeATLAsuZv07mcfGFmTAfdnwGZ3bLtXa8W4wNVz8vC5d63R1Hp7cab1+QslKno6RaNR0Bg14VWXhVSnPlGnVeUuCyzl4HNJDFj4LCNYA1ejzM1/3Snmgolt/Nul2uwFU5JthD7jNnisxE+VyiCNUz2PmHEwUFhYvDYhHfOWeYKuSD++XiHgANB8noVHXqdoUGvUR1+/ODUlT44xNRhvR7CTJiYf2r0rDJjhdz342v1zyObx1ZRwTUl3XDtN9g89sSn+3uL2EAke0l/MBK64mQGYvFOwSdW8RfeIH/ECaDvB8bHIwW+UTK2HW9njKOvvp56Pei+OoYimvu7x0uKaM9HpNxunljsEz9BZXOA9a8IL47fvVsQRA3vWuveDm7FyLaybpx7xxUe+egbzWZH4MeYcjAcWCuILgsEa91T6BK2i4Fl1bj0fR9jr5Ngtl4IosXfz7KjxOiCFkzvaZi2qgndO2DvNB7EdOW2K5fbzRTrDdCyL+7eKOgoHBtctULOZs2bbrap1S4RExmC8sOnLUr4ljJLTGw/EAq745tjb+HKwt3J7NkXyrNI3y4Kdod1h+RHc1GUaR0vk+UMVVx94cBr8C5w2JurC8S9U5UpVz5pndEWpubJHP11iIOSIEo46hEeU+YKxLn3NPw6yMi6Y0ZIAZ8q5+3n7vOOCYyXRd3KQ4FN4Wic6Lk8QqWMa6qBRmVSlRCKqDXM+DmLR29Xd+IiWJYSxnBGjtbFi6n1sliVa2RIk6X+2HWAPvX7eIOg/9PDP7aTJTIciuluVIs2vqOJPpEtIW+/xH58b/ESE/h+uF0VjHuLhr8PS4xpSIrDrBIN/YqEeSuQoUUci4Zr1BI3Px3X5KCwr8Ld19Rn57Z7Xh729tkXXC5uPmIgvXcAZuaxppylHMKFk6UZMyq0eemCkjaKupeq+o2+Q9JverxuIxjW8eEItvDiE9g0xsyOqVxgYML5T496FUZpdo7W0aprAQ3lTXHoimoWk/As08DIOTyX+M/gdkkBZHez0pDy/p+RLSHkZ9KISdhI/x4m2zbMwsGvQYrn7JXR6m1MiplMsC5g1JIG/6BvMdeoXBqg+yXGWt7zsqn4d7NcHqLjLx7htQY83I7/hMj7nyCE1nFdPXOQjtnoC2d1GSAg/MgaRuBU3/j043xAETpSpxHxheeI9qj1O4hb50Wd1cl6EFBQeHa5KoXcvr06XO1T6lwiWQWlrPueLrT7RtPZNE8wpcXlx/llZub075uABpTGZiqdGHcfKVg4REIY2bC0SXSEavbTaI7N78tapbsk7L/gbniVTN+ruyrUovqZucXzi9Uo4Mt70qnxsqBuXDiN0miWjjRpsDxCoE6XWRxl7gVNrxqe46LhyxK1C7i3QMi7Y39Vbp2TYbJ/Lw1PUrjApMWSUrDT3dCx7ug452yMNS4gHeEHGfkp7LYK8mEuj2g+c3i09NwoHjjGPWiBjKUymu2pmCAJEXEr4XblkCjWkbUFBT+AU5nlRDu62anqrwosuKkMOkZdGUuzAEuGhUBbirOXFZyVahE6Rr10o1XUFC4ZIwmM9qGg6QxkR1vv9G/gYQLqP+iZWNgDNy7SZoqJ1fLOPW6V2TUCaS5VJ3ErZJqtf97UegCbPsApiwTRbCxTO7p6Udh+QMw5hvxxjv+i0Sob/9IGlXDP5BClcUs52kyTLxUVjwixzy8CFWPx2qe/1rHYhG189l94mWDRd6PzFgpipmBqoEWiVvFE+ju9aKMzo6X57eZJOu0PbNkLF3nDZ4hqCLayRrO2bmPLpG13A8jod3tsOwe+31MBjLzi0nNr0C1701bEacq+cmoz+zCSxdCBnpUFgc/B1VQVRulv79PNCHeyhiRgoLCtck/4k61bds2vv76a06fPs1PP/1EZGQkc+fOpUGDBvTs2fOfuCQFoEQvxsYlehPeOueddm83LXmleh7q15D0glJG1TWg3fYeLuo+tgSDoEZwbj/8+Zl8aGs6AoIay4Jox+dyk249wf7A+iJJkGo4QObVGw6Cm96D9f+zT4kAkfWW5dkXcayU5YnpYItb5L9aN+kghbWG2BWSlFGVilKRXE+YKwWYiPbQ6R746Q7ZnnEMsqp0ikwVsPVdUeNsektez47PpfDU9wWRCSf/KQkYLW6B9ndI/PjJddD5fpF5mwwSW7rp/yQafdsHNV+HxSKpXvesE1mygsI1QkJWMWGX5Y8TK4lrl1oA+osEe6guX5GDRf6mBcb87deloHCjUmowcja/jJ/2ppKUXcKAZiHccttyXGKXozowV4oebSdD6/E1Df0vF78oaaw0Gw2nN9iKOCCJSd7hUJRme6zREIhbLbHZm9+W/bs8IIreM7tqHn/RFCksLJkmxYo/PhYlys8PwpSlUrRQa+DITxJiUJWc07JGuJ7wCJS1l6NRM7VGCmfVvYm8I8BsgZ5PiaJHXyhqHpNBxqq0Ohk7LzgD3mHO/YhAGl0VpTDsfciOq2m8HBhDUhEMinZHs32r08NoT/xCq4gnSMgqJlflR4SLe01TZgCdN1lmbyAPnVbN3T0bMLx1OO+ujiO7WM+wlmG0quN76SPF1xjlFSYyi/QcTs2nuNxIu7p+hPi44e/heuEnKygoXFNc9ULO0qVLuf3225k8eTL79+9HrxfFREFBAW+++SYrV6682pekgCy61hxL56mfDvHN7R0Y17EOO07nONx3XMc6JGaV8NWWBHY/2BD374dI8STnBPT7D/zykHS3rEkEJdlStKiKVuc43vvkKkkD6HIfJGyCY8ug2Qj599qXRCYNENVFum7OOLkGBr8Ox1dIgcYrDApS4MB8x/ubDGJqes9G6TgVnpW0q5IsKQa5etnvn7JTZro73QPBr8gHvqRtsOZFm6dG4TlRDeWnQPxq+SAY0hySd0jRau2LUuTKP+M86rzgjLy3SiFH4RridHYJ/Zpc4piA2STeUw2uvioz2ENFymUpcipfY16SUshRULhI9BUmNp3I4uGF+89/xl97PIM33V347eG7iGozUR70CJSCwN+JoVTMhq0muVZ2z5SR7l8est1v63UX1a3FLP53vZ+WJtHmtxwfOy9R7t2RnUR5M+oLWPGo/L9RD7ucRHUDJje/f6Zz+lcwFMHOrxxvM5skbSyygy0GvPFNEN0HsipH2be+L542Vk6ugTqdRcGUHS9+gXU6idGxI+p0gthfsfR5FlXSthr+O1k9XmXtSRUNgowEuvk5TdgyewYToRG/nS/2FPNWz//it+n5mvsNeZvQyHr8/khdvNy0xKUX0e/9Lee3Lz9wlphgT+be3YUIv+uzmFNqMLLpRCaPLzpIhclWgLu5TTivjGhBsLeiPFVQuJ646vHjb7zxBjNmzGDmzJm4uNhUHz169GD//v1X+3IUKskq0vPdH4l8Pqk9gZ6uqICbWobV2G9Ii1A0KhVqtYpBTYNwOzxXCg0g3jHx68Ug2DtcjOmcLdJa3FJzoQXicVOaDQsmyALr2HL4/UlZfA19W8xHQcagtLXcSLWu0h27a7XMZJ/ZAe5+tUeLFp4DixF+fkDGshZPFe+crg9KAWbstzDue5kBD28rxZ51r0iRKuVPiOoqC0P/BlC3O0xaKHP/82+V17H1ffimF/hGwB8fyYKx+6OVXf9aUCnz2QrXDgVlFeSWGIjwu0RFTu4pqCi3xbheRUI8VJwpugw3Vc9g+f1TDI8VFC6azCI9Ty4+WCNEqqCsgkcXHSJP5SdF0r+7iAOSsJS4VbxdqpJ2SGLIJy2UoAL/BlJI0rjKvX/j66K4KThT+/HNFTDwf/D9SBm/nrgAhlb6+kU5MXF386PU429SHV1NVJraFTP5Z6DX0zDkLWmCDXwVPILgyBIoTLcv4lhJ3S1fwU1kTTb0HfEaqo5vlIy3lRWgOrgQy+5ZEkEOENSIrFHz+ep0MGUVRtYkmclrfU/NY1RibncH4zpGEeytY1VsLj8UdyLjlsWY63QFzyAs9XrAHStRN7uZiAAfWkT6YjCauW/uvhrHSsgqYcaWBAzGy2gMXAOk5Zfz8MIDdkUcgF8PpbHqaBoWZ8lvCgoK1yRXvUEQFxdH7969azzu6+tLfn7+1b4chUri0ot4sF8jnl1yiN6Ng/HWaWkZ6cvIthFsj8/GYoGejYJIzilh+6lsKkwW+kRp8Tq+xv5Ax5bJoqj/i2JON/Jz+w4YQHAzaHkr/Di55oX0fBzm3lJToVKaI+NMHe+S7k3bSeKjc2iB4xfUdgokbRezu5Dm4s1TkiPxoVZVT3WiusDKZ2xRm2qtSIZdPOHgfNscfVgr6ezpfGTsyr8+LL5dxseGfwSlWbLA2f+9SKyrYjaJXDioMbSbImbPvZ+RRaCxvOY1hbaQmHQFhWuEhCzpiIZfqrw8/aio3XyuXvS4lWAPNdllRsoqLLi7XMJYl1ojHzjzkq7YtSko3GiczChC7+SD7oGUfPJKDfh7XqExjopSMTEe9Jo0UKpiHa0e/jE0vRnKCmQtUtWfTq2R4o4jvxUAVHL8MTNg6T1SHPKrKw2Z/i/D8vugqIrHoNYNy4R55KgCMBTpCbqeFA9aVwhvI0UwR9TvCds/hrJK/8O8REkP7fWU+Ac5wjMYAmLEA/DoUin6tJsMfV4QD5yKUhl563SP+BB2uReMelRb3saUeRLzY8fZeDKHdzfmkZCVQ5NQb9pF+RMXMJD29dbjmrzF7nTFvV4ij2DiMopYfF9XdiXlsi8pj+X5DRk2dA65BQWEBPoTHBSKi9bW215zzLlP5OK9Z5jeJ4bw61CVs+LQuRoFViszNicwtEUYIUq0uILCdcNVL+SEhYVx6tQp6tevb/f49u3biY6OvtqXo1BJmK8bk77ZSYnBxNpjGSy8rysTv9mBTquhfV0/AJ5dcpjyChOzp3Xkvrn7aBkQIjHZVen+iNyk1/9P5LOpe+G2RfLf0hxo0BeCG8P+uTUXSuFtZQzLUUEDJPFi6LvyvKV3w+A3xUTv0EL7/YIaSeTowolSEMqKE3PC8XOlk7ZoSs1jB0TLV6d7pNgS2Q7KC8Ubw1gmqRUJG+RY6UdgwXgYMwvO7JEPpqYKiSUvz6scK/MRD6DWE+DIYlmQWCkvhA53wKLJcq49s2UMbNWz9rPmrl4y3mVVISkoXAOcyixGBZeuyEk7LEVPzdUfMAitjCBPLTbTyP8SVQBeIZJKo6CgcFGUGUy1bjeZr0DXvyhd1DS5ieKt5xkkDZ2D8+z384kEnYckLVksMGGeqHczjsr24yug/VQx5q1Os1GiInHzFbXKtN+kUZVzSkaoVzwMU5ZLsynzuKhKmgwFfREeOi1G83Wm4lC5iMpm7uia23wipUlWni9jUqc3y2jViodh6grnhbCRn4rKOve07bG0g7L+u2OlFM0Tt4giGmS0PusEBESj2TcHdbtJlLgEkJAlyqm4jCJiQjx5dHkiL/Z9jU6dCvFOWYfZxYuCuoPRu4dw57wTpOaVodOqmTGlPb7uLqw+ms7bq/IB0GmTWf9kH6ICbHHnBWXV4smrUF5h5kr8CF8NknNLnW7LKtZfmd9NBQWFK8ZVX1Hfe++9PPbYY8yZMweVSsW5c+fYsWMHTz/9NC+//PKFD6BwRUjJKaWkcvHlqdNQYTQz545O/Gf5EbbGS1RjVIA7zw1pxZw/ktAbzSw6Wswt3e8l+Ox0OUj9ntKV+u1xmW0+sRKaDoM9cyTq29ULUnZBxzukA9N4MOydI+NOzUaKSV7Vm7sjClJsnZ6Dc2HYB9ByrKRVGYqhxRhJxfn5AXtVj8Us1zV1hRRHNr4uyiGVWro//V+EeWNELTP2W1j7ssizrXgGw6jPYf2rskCzWMSgeOD/ZNFxyzfSSZo1UMwUtW7QapxEmfpHS5ypNZrUUCKFLHPlYvfUelkcTvpRTJ4LUkVS3HKsmDdeTUwV8v3Q6GoW6RQUEEVOsLcOnfYSCiImA2QcgZh+V+7CaiGkspCTUng5hZxQ+9hiBQWFWmkW4eN0Wx1/d3zcHYQpGA3iyaJ1k3v4pZCbCAvG2SdiBTeR+3KjgXB0WaXSY5CM/vz8oK1p8stDcPOn4nFz9Cfx2OkyHdwDxPNGXwgu7tBuqqxZ5gyR+yTI34ZbvoZVz8i581OksPHHJ+BfT+LMU3ej6vE4bloVqn+giP2XMBRJU2rUF7DlHXl9KhVE95e1T/pRGPBf+HGSvG6dh4xilWRK8md175v6PaX55Widl3ZQ3q+9c2xppiq1rIfSDomKOXU3KouF/k1DeOfWVry7Oo6cEgPvro7j00nteP2347ycW0bziGG4aTXcHBBEcnIxqXlibKw3mnlk4UHeG9eaWdttf9P1RjN7knLtCjkDm4Uyc5vjv/vdogPwcrs+R977NQnm5wNnHW5rE+WnRK0rKFxnXPW7yvPPP4/ZbGbAgAGUlpbSu3dvdDodTz/9NI888sjVvhyFSs4VlBHqrePlEc1pFOqFxQJqlYrZUzuiN5qxAMfPFfLJhnjiM2W0IiGrmNzgzvg3uglt/CrpYq2uNJAry5UFwOKp0PY2GYUym2RhsO87WPdfifKs200UPCHNZBSp6XDnF+kbJZHmVhoPleJMn2eg34uSHOUdCvNudfz8kizxunDzk5luqIwLD4dvh8mCrfUEiFtpX8SxPnf5AzD8Q1hSGUOeeVwWGgHRMgI1o7ttgWcsl+JSboJEkQY1lGJQSZYs/s7stD/+kSXSCWw0CFpNgDYTrm6yj9kk782e2fLavUKk+KSMdilU41RmMeGXqsbJipUPSoGNr8xFXQA/NxUuai7T8DhM1IAKCgoXRbCXjmnd6/H9n6JkaxTixV09GxDo6UqApytmswWT2YJGrQKTUVQYu7+RD/7e4TJiHdwMPPwvfLKSLPhpWs1Y86w4+P0JGZ1qORZ8I6XBU32/8gLY/4MYHXuHyz184Xgx8b3jNykSeQRKIXrBeFsDBqTxs+xemPwTfHsToBKljn892xoiKw7LgP9xLK2E7o2uM3WtxgVm9oWgJtDzSfH8U2sk7OHkKhlfH/Sa7BfZQf7G63xlnRbYsOYoe3TfmuPmVYlbKWbJ1kJOTD/52+sdDjmnMDe+iVyTB7lleno3DqJzg0BK9UbcXDUEe7kyY0oHCssrMJrk5+vrrQmsOZZhd4pivZGiciP+Hi7kldpUN2azmbSCMgxGMzqtmkahXnSq58+e5Dy757toVLw0ojm+7tdnwlPH+v6E+ujIKNTbPa5SwX9uaoafklyloHBdcdULOSqVihdffJFnnnmGU6dOUVxcTPPmzfHyUrr//yQd6vnTr0kI3/2RxDNLDlNWYSIm2JOH+zVkV2IuTcO9aV/Xn27Rgbhq1QR4unJ3zwYklpvIbPEKrTs+jI/WiKo0Vw6YkyAjTmaTpEXsnml/wm4PA2r5kJSfJH4zgQ3h8E/QZqIkRYHIdyM7SEe/xWhbgoJfXSmgGIrlOOYKOLwIYvrX/kJdvaTbN7NSGdDsZnAPtJkgNx0uCzNHlOVBRYks6kpzJHlLXwS/PyXFmoGvyuvsfK9cn8VS6YfTEP78UhaUu2aILFvnU9N42WQQRU5Eu6sez0xmLMwZLGohK6fWi5Fh90fB3ffqXo/CNUt8RjEta+m4OyR1H+i8pdD6D6BWqQjxUJFSeBmyca9Q6TaXF8jvroKCQq34uLvwaP9GdKjrz5b4LAY2DeWN32M5my/KiABPV968pSW9GwXjkXME5gy1jVSnH4b4NTDgf3IvvZAytCjTuYfL2f0SXe3mJ4Udz+CahRyNi5zHzUdSlYorP/jHrZJ49CV3yFj22pftizhWSrKk2DPue1H9pB8RHz1rIUelArWGnUl5118hJ2m7FGfSD0vTrCoeATDodRltH/quhFS4esC4OZIOmrgFRs+As3tlbWY2SXFOVUvGikptU0ppddD1IVmPjf0Wds8kZ8An9PpwB+UVZgI8Xbm/dzT5pRUsO5DKf29uQa9GQUQFeHAyo4jBHzmPI88rMeCp05JXWoFKBa+NbEluaQUjPt1OTomBYC8djw5oyMcT27L8wFm+/zOZgrIKejYM5JmhTYkJvkTF2DVEpJ8Hi+7rxv9WHGNLfBYWC9QP9OC1US1pGub9T1+egoLCJXLVCznz5s1jzJgxeHh40Lx586t9egUnBHi68tTiQ+xPyT//WEJWCU8sPsSH49ugAkZ/8Qf9mobQt0kIId46dFo1+goTFbpAXtxbzn+6uhBR9aDbP4ZbZsAvD9tFRhLdHxoOgt1fQ/NRENpKfDOW3yfbh70P4e1EFWIoltlrzyDwDBVD4brdRLa79V0Y8YkURHZ8IUUZlRr86oFvHXk8/YjtvN5hUoAxVelEBDQUSW9VjPadCjuK0mUBU5pjn7wVt1LGxYa9Iwu+rLjK40fDTe9AaDMpSo34SKJK206CXV87Pkfjoc7PfyUozZWZ9apFHCvb3pfCmlLIUQD0RhOpeaUMbnGJBZmUHRDctPZF/BUm2ENFcmHt3h0O8a5M78tNdJyuoqCgUINALx0j20bSPMKHYZ9sx2CyqeFySww8MG8/+55sh8eKRxz74m18TdYHtRVy8pJl3Lo2SrLlvm2xQOf7JVHy0AJRjdTvJUWcXV/LyPeoz2W9Yi3mJP8paxWzSe7b1QlrJU2Z0hwZR07+UxoxbW4TX7ySLGg6kgq1GyNaX4cfknMSnG8rzZWmVu5pKbikH4GNb4LFJGux6L5SzNHqpKCTdULuA01HSOPIEc1Giiq48VDoOl3WYkPexFKajfHuDTz7ey7lFfJzlFti4K1VJ/jfSInMfnD+fubd05meDYPxdtMS7KUjq9jxWi462IuMQvmZG98xirj0Qubtsv0cZRXrefmXY2QW6ZneN4ZxHaIwWyx4uWnxdnMwFnidUT/Ik08ntSOv1IDRZMHbXUuIt2JwrKBwPXLVV9VPPPEEISEh3HbbbaxcuRKT6TIW1gp/CyV6I2fzSjmbW0Jmod6uiFOVTzbEU2GyYLbAhthMdifm4KnTMGX2bu6ftx8zsD0hm98TKjDV7WF7YsoO2PstTF4ihoIjP4O710GrW0W6HLdSUpv0haJEsVjka+XTMt/+xyew4hFJNtg7B+bdIp3xjndLl7zNbbJQ2PI2HF0i3TNXr8rCSQtZTEz+SRYFai2M+Fjm78GmeHH3l4WXlYoyecwZATFQmCYmfy1vhWM/27adWAkHf7QVcUAWOQsniQlg05HQbpoUqNpNFaVRVVQqieJMP2r/eEWpSM/Tj8jx9EXOr+9yKM+HM7ucb0/+8+89n8J1y+msEswWqOPnceGdrRScET+qkGZX7sIughAP9eUpcrzD5b+KT46CwiVhMplZsi/VrohTlfLCbPtmS1UsZji33/nBC9PEhFeldq5gVamlqWMyimondbesS3o+KWEMfZ+HdS9LkAHA6hdkVGjKcjFMbnGrqE7UGns1XuMhsrZodrOsNfyiRNmjUsPP0yXF8pYZ8rej6wOozBVo/sEi9mUT1dn5tsAYUVn2fk4CHBZNFmPpQwtl/Pz4z9DhTtj7naRa/fa4jL+Ht5Gifo1zdZVt3R+W79miKVJAO7kG1ZK7cJk7kld6euPmYv8+ztx6mkmd6gLw1soT5JToCfV244VhDs4B9G0cTHJOCRUmCyoV3NIukoV7HEfOf73lNDnFBkJ93Qj3c78hijhWfNxdqBfoSUyIl1LEUVC4jrnqipy0tDRWr17NwoULGT9+PB4eHowbN47JkyfTvXv3q305/1qyivT8cSobD1cNZQYjZyrN4ByRnFOKp872o3JnjwY8u+TweXf7t1ae4OMJbZmzLZFBvd+h/m8ToPBspc/KYxLtmX5YRppMBuj6oJgJ/vExtJ8mxZCqJoAN+kLCxppKGRDlTVRXmdF29xUDvDO7JC7UYoa1L4l3jRW1BkZ9KXGY296Xee1b50jn5/gv8u+2E2U+31AMBxdIJ2jTmzXPHdJCTImHvy/duaX32iczWMyyAGk2UrxuUEnUaeyv4v8z/CMoyZDCTnk+DH5DznVmt8i6o7qIBDx2he2YRemw5V048IPM7qvU0Hw0DPk/8LHTP105qkfBK/xrOZkhRcQ6/pcQu5q0XbqygQ2v0FVdHKGeKracMWO2WFBfyuiizlsKxLlKIUdB4VIoN5o5dKbA6XZnEeXncaSCsZITL42NlJ0ScnB0ac19Wo2FrJOSlOnmI94rDfrAuYNy7/cIFLXIkZ9EFWQsh+X3S0PFrx4sv0fWDv7R0P0xUQk1GgRNhksqpnXUavc3UpwY+Rns+1au69hyuG0x/PEp6q7TKTJf4jjqtUBwE1lnFJ6rua37YzJWXnhWXmdwE/smVsJGGc+eOM/++b89LubSOafgxG/SZGt3uxTjvr9ZGldW4tfKSDtAURoRBz5mQpv7+H5v5vldzuaXnY+yP3auEH2FGbWnigHNQvlqcnveXBXLmdwyvHRapnWrx9Tu9TFbLPRpHIyrVk1WkfOkJoPJTG6JgVAfNwxGM+6uGvF1UlBQULhGuOotAq1Wy4gRI5g/fz6ZmZl89NFHJCUl0a9fP2JiYq725fwrMZnM7E/OJTrYk4zCctILywnxcV6Rd9Go0FT+pOi0aowmM8V62wIrIauYZ346TIf6/qQQSsntqygduwDTrd+Kuub0RpmfzkuSG/ral6SDFdZaxo0SNkCdjrYTNhshkd3OOLlauuMB0dJpc/OThdyhhfZFHJCF1i8PSeHk5BqRCv/xEXS5XxYPyX+I9PrWWRJBmrhFVDt9nhPJNMgCo/FQGP89ZJ+S6//lYTlmVVqOlQ6dRwCse0X207hKGlVRmhg9l+XJwseoh9+egJXPQMYxOLURFkyQ/285VtK9Di+WhePe2TYTZYsZji2T81v9iP4qbn5Qp5bOW/2ef895FK57TmYUEeDpalfYrR2LLOhDW0jH+h8k1EOF3gSZpZeoylGpZLxKUeQoKFwSVtNYZ6SUusp4kiNUaikilORIEXXnV/D709L4Kc6As/tkP88gGf9tO1nutyCF4/bToPczogyZPViMjovSZHt4a2kuLb1bzHwzY0VVMmaW+Oj8+YkcNzdRlLZxK6VQ0fcF6PyAjCJX98vJOiEeeS3GyL+P/CSNJL86qM0mAjyus8QqAFSiTqpXZQ3gESiNpOyT8pr1RaLA6f5ozacf/lFSpwIayOhZn+eksVaQKn45g14XtbJ3uIRJVC3igKylDDYFsi5uOaOa2K9VPV01VFQqvkJ9dOcLLb7uLtzUKpylD3RnyzN9WftEbx4f1JhQHzfCfd1pFOpNvUBPPC5wL1Or4Nklh7j7+z18vjGe5JwSzEpEt4KCwjXCP3pn8fDwYMiQIeTl5ZGcnExsrJO5WYW/lcwiPXUDPbnj291kFxuY0rUu/ZqG4qpRO5RAD28VTmGZFBI0ahUVppo3saxiPR+tFxPBnx7oxv82+LBguBbfgf+z33HPbCmW/PEp9HhUVDAmA6hdxBw5O14WYY78WqwYSmSfjFho2B/GfSdJEfPGON7fbJSFmvX4sb9Ci7HSaZ84H1DB5rcksSrgeSmWaN1g3LcSQVqSLR/mEjZBSFPpIFWn4UBJxZg/VhYpVg4tlK7U+B/kGjU6uGuNjHB5Bosa5/cnbPu7+co1LrsHJi20mT5XJ2GDzN//HYlSHgEw4kOJVa3+vvd8CjxD/vo5FG4ITmYUU8fvEtQ4WSeleNvoKvs+OSDUU6rRyQVmwjwvsYfhFaYochQULhGtRs2ULvWYtzMZR599/YLCYdgH8MPImj453R8VtY1RL/HW1mbGnpkQ2VGUvWot+NcXlWvr8TB2jhRYNC5SBPqmn82fz1AM2z+CiM2SmBndT8a6LBZR/8b0kzCDMTMr05m0sg7QF0lRaN6tMHGBmPc6Uwod/1mef2ihXHdFGTQbjao0E3ff63C0Sl8EOz6DFqPgprelEWbSw77vpQlmpShd1i4qtb2C11Aso7UBMVhuX4bqj08kxtyKuz+Wsd+i8giEm96V74+12KZSQd2uMn5vxWys0X2+tUMdfjssz7mvdzQh3jq77VWblBZLzR/CQE9XogLcOZNbU5UeE+zF3uQ8fj4oiqJdibnM3JbIkundaBp2HSqsFBQUbjj+kTtLaWkp8+fPZ9iwYURGRvLxxx9zyy23cOzYsX/icv51GM0WPlp/kvqBnnx7RyfUQLnByBeT2+GisZeNNg71YnS7SHzcXejZMIhSgwl/T1e0TuSlgZ6uuGnV3NzMB+/i06IcWTxVvn55SNQ2bSZJ1LVHkKhOOtwBa16QSPBW4+DsgdrTp1rcAm2nyI1+2X2weJosrKqOOVWnLF/GIwJj5PxeITLqtfYl2Pg69HxCunl/fgrbPpS0C48gMTH8cSKgkgJNwVmRBfd8Qua563YTc+YOd0qhp2oRx0pJlqgSijPgh5ulYDJ3NHzTR+LJ71gp+7m4i4+I1fRZrZXiUptJjseoijOgwoFJJIDJJItIBwsXhwQ3gwe2i2lhWCuIGQDTfpV5dcXoWKGSk+lFRF7KWNWJX6WDG/jPqy1DPORvVvLlRJB7h8m4hIKCwiURFeDO17d3xLuK8kGnVfPaqBZEB3tJ4WXiAmg/VVSxDQfCrbOlKBDSTKLFrUUcK2f3SuPEt4588DcbZSx60RTZ//QmKRhUDVmwcu4ApO4RL7px38sI1ZiZcGSJFGus9+aNr8O9m6Bed/G9Gf6BNIRKc5y/2KprkHrdJQzBVE6Fxh0Tmr/2Rv4TGIpkRGzlM1Ks+WmarLmqFnGsVJTWVF3G9Iczu8hV+2OOXy+jUlUpy0O1aIoYVu+dDaO+AN8o+d4PeUu+p1XWMMbogWxOtr3HvRsF0aVBIBtiMxjVJoIhLcJQVRubzSsxcOhMPv9ZdphHFx5gQ2zGeaNjgFAfN2be3hE/D/trD/R05eURzZi51f7vfrHeyAvLjpBXWst6U0FBQeEqcdUVORMnTuS3337Dw8OD8ePH8/LLL9OtW7e/dMwvvviC9957j/T0dNq0acNnn31G586OR0VmzpzJDz/8wNGjYijboUMH3nzzTaf734hUmMwkZBbz9JAmfLnpFLe0r0OFyUKglysrHu7JvuQ8MgrL6Vw/gDBfN/Yl51FhsvD8TU0p1hsxmsw82DeGTzeeqnHsF4c3Y/nBs9zfWI964YP2hQR9kSwIJi6QJCqTQdQ5PZ+UD0mLplQWaSbLguDU+poKkdAW4ovzy4OyILNSmiuPZ590/KIbDpR5+fSjYoy843Nof7uoUQIbw6z+4uHRaIic+/QmMVGeslTk2WoNFKdDZHuRWeckyDGNelmAjPxcikDOiP1VRpiqzoqbjXId/vWlWNNoEPz5mXQBb3oXDKXyulRq6PsfiVhf/bwtVctikfes1TgZf/KNhPIiKZLtmQX5KdJlbDZS4tBr8wXRaGVUbdCrYj6t0V049lXhqmE2W1D/E7P5JqN4TxxdSlnOGVJyX2SIXyoYgqUQWhslWeKP03AQqP/5brSrRkWgm4qUyynkWH0iKsrBRTGGVFC4WNxdtfRtHMzqJ3qTUVCO0WwmwtedIG8dbi4a8AqGr7pBk2HQZKg0XVY9K8qOwIbO1bnnDsCw9xynTNbpBPt/cH5Rp7fI/fSPT0T5uvSemmPZJ1fL77pHkBSOFoyTe36Px50fN6ixFJY0LtLs+eUhLFlxmIe8z+7TOYxsV+dCb9c1hSp1j+0fFWXg4lFz/AlkjeLub/+98AyS9dSa/2AxGdDsneX4JIZiaZBVlMOvj2Ea+y0qkwH1rhn2foGunhgHvEp0ph+vjQqkVaQvRpOZo+cK+OWhHui0aiJ87ZsMeSUGPt0Qz7d/Jp1/7NfDabSM8GHWtI6EVe7fJMyb3x/pyZGzBcSlF9Es3IdIP3ceXniAcwU1m2UHUvLJLzXg7+F6wfdQQUFB4Upy1Qs5Go2GxYsXM2TIEDQa+w7F0aNHadmy5SUdb9GiRTz55JPMmDGDLl268PHHHzNkyBDi4uIICak5ErJ582YmTZpE9+7dcXNz45133mHw4MEcO3aMyMjIv/TarnUMRhNx6UXklBi4o3sDMgvK6ds0hA/WxpFZpMdbp2Vyl7p0jQ4kObuECD935vyRyO7EXCwWiMsIYHSbSD7dnMDQlmF8Oqkd32xNIDWvjCah3jw2oBGYK+jQTE0IpVLcODi/ZurRoYUw4L+SHtHnORmTaj5aihl5iaBCFnIT5knqVcIGWUC0GgcNB8joUdUiDoCLDno/KyNJIGNTzUdVRgdrpYiy7D7IqJIIFbtC5tmDm0pXMCvO3qwPRJ3T9UHp0oF493S+TxZpZXlyXU1ukqJKbR9sXT3ED8gRf3wMU1eA1l0KNTd/LCNoVZOk4laKSeOw9yXJK6S5LDxPrZOvgGiY+gskbpMil5WEDbD1fRnnCnGc4mCHxkUUFArXBDtP5/D2qhMcSS0g1EfHnT0acFfPBlfHcDH9qPzOZB6D0Jac9OqMBRV1U1fAsnliZB7VxfnzD/8oKrc6na78tV4kYZ4qki63kINF/j79w+lbCgrXGy5aNZF+7kQ6Gsv0DIZW42WtUBXr75wz8pJEIdPxTmmG5CXZthkNcm92VOQBuVeXZkPydmmYVC/iWDm+QtYheyqLEHU6yzGbjhCfPxcPWYuc3iRrgL7PQ+JWmLxU/lYUZ6IqSEVlLKduwPWnbLXofDh/pzn8I3R/CDKOi4K3PF8CI0qyoONd8l93f1lLNR8pqZ6/P0VW7zfJL9YTWFvaZuFZ8AyEs/vJLSjmuCGEFlGDCMpLhLJ89PX7Yej8ELFlARxLy2HD8UxS8kro2yiERwc2xM/dhTBf9xrNjqScErsijpWj5wpZsi+V6X0bolGrUKlURPp7EOnvwdCWklK4dF8qidnOR/wvVuysoKCgcCW56m1S60iVtYhTVFTEN998Q+fOnWnTps0lH+/DDz/k3nvv5c4776R58+bMmDEDDw8P5syZ4/T8Dz74IG3btqVp06bMmjULs9nMhg0b/tLruh44nV3CmK/+JDatkBYR3mSXGHh71Qkyi2SxU6Q3MmPraZbsT2Vy17pM+GYHeaUG7u7VgHt6N0BfYWJLfCavjWxB5/oB6CuM/PfmFix9oBuP9G9IlGsRnZO+pt6igajmjoL1/5MO1sBX7S8k97TMPsf0h6Q/xLxXq5M5+O0fQfw6oFJt4hEoXbd+L4raZu93UtSojskoBY1bZ0GvJ+GWr8W88OQaKEgSibB/vZrPO7ZMFh5D3nL8puWelhlvKxYL7Poa5t8KCZtlxv674aIsajXe+ZvfegKU5zneZh3HcveDztOlI+koDjxxi6iYGg4Qs8Gqs+O5p2HHl+ID1OUB++eV50tSRKmT8ytck/x66ByTZ+6i1GBkStd6NAnz5q1Vsdw/dy+GC6W9/FWOr4BZA6T7OvwjGPo2J8JHogLq9JgM3hGw4TXxSrCYaj4/6wScXCvFx2tIwRLiqSIx/3IKOZVF2JyEv/eCFBT+7bj7wcD/yTrB6vnmXx8GvwmRHZw/L3UvNBoo64QRH4N/A9u2E7/JPdcZDQdIg8nVG4rTnO9nMdv8cEKaQ3RfKRi1Gi+F7vh18vjUFXD7zxDeThoyvz5me15YK8oqzAT76Jyc5BqmXg+bkrcoDRoPk9Gn+DXiGTb0LYlh7/IApB2R6Pbh70PzW2D9a+QNeI+P4/xJKaL2lM3gpqIgBshL4uX1mdy2qx5zG33CLx1/4GX97fyZ78fEWXtIyCzm4f4NWfFQT2JCvLBYoE6AJ1qNmrIKE2dyS9mdmMP+5DzKK0x0rOfv8JTzdqaQXeyk0Ae0q+vndFuTUG983G+cKHIFBYXrl3/M7Hjr1q3Mnj2bpUuXEhERwZgxY/jiiy8u6RgGg4F9+/bxwgsvnH9MrVYzcOBAduzYcVHHKC0tpaKigoAAx6axer0evd72x76wsPCSrvFaoVhfwYdrT1JhsjB/VwoDmoUyZ7tj887fDqfxQJ8YHhvQiGBv+RB27FwBm+OyySrWYzLD8bRCNsdl0SDIk6nd6qEvyqVT3mdo45bbDlSSJR/2ej0tqpW4VfJ4aEtRyniHi2lhdlxlikFTSW3a962MFi2eKv+/71vbMVuMEbVNdSpKRH2CSsaMfrzNti39iCRIjPpCxiOqq3kOzIXo/qIw+OMT+21BjR373lgscGi+GC2DKH3CWtm/TisNekNAjIxXOcK/gfjhlOXKKNTvTzreD2RevdfTEpFanGm/7fCPMnqm1kK3hySq3UrKDukQludV+gNdf2NTN8rv4sVw7FwBT/90iG4xgTzQJ+a8AqdT/QA+Wn+Sl34+wrtjL73wfVEcmC9+VvV7ye+EVj6AxOaYCfdSofPylbHEpK0yppgdJxG9ViVXSZb4U/hEygeBa4gwTzW70yqwWCw1vBRqxc1PPKxylUIO/Lt+FxWuAl4h0P0RMSw2VcjfHO8wUb22nij3tup0fxiiukJEO1jxMAx+XUzJizPluZ4hoo6prrbpfJ+EDFSUSWPEO9z5dam1tnCDmz8VX7qkbWJqbCX9sKiMx30vY2AZR8EnXNYNWh20GEOR3oSr15Xpm17R30WtG/R/WeLV+zwP34+wH3VL2CBrjUY6WXuseBjqdscy7F3Kx/7Au1vyWXjgDMeyfenV92VcV0yveQ7/BlIsKskGwBwQzR3dowjzdUelgh0JOayMzaJfyyjMFlgfm8n62Ezu6dWA2LRCbm4jBaKCUgNL95/l7VUnzod2eOu0vHJzc/w9XVl3PMPutKUVRsy1yGqCvHTc3zuar6t55LhoVLw1phVBXjpMJjMZRXpKDSbcXNSEeLvhqv3nx4gVFBT+PVzVvzjp6em8/fbbNGrUiHHjxuHj44Ner+fnn3/m7bffplOnS5PgZ2dnYzKZCA0NtXs8NDSU9PT0izrGc889R0REBAMHDnS4/a233sLX1/f8V1RU1CVd47VCcbmRnafFpC81r4zCsgpKDA466ZWcyy9j84lMHpi3jwfm7WNHQg7vjG1Nmzq+bIrLpEdMEAAtI3zYm5TLTQ006KoWcaqya4YsxkBmqbs/Kh+KXNzEm2XtS7DmPzJu1X6aKGniVklMZdVFVkQ7aDdFumnVObhQFmjNRsDq52puN5tg3X9ln+oYyyF1txgXW+NLrfR41PEiEuzTGeJWSoGpXg8xamw9QTx5bvlaIsndA+GUE9VX72dEQf51r0pzROddIox6WTwVZzjeptGK70697rIIq0rBGfisg7xX5QXOz3GNcqP8Ll4Ik9nCs0sOE+brxr29ou3GqNrV9efung1YvDeVXw6e/ftPfnSpFHEaD4HeT58v4gDE5piI8q68ZahUorbpeKd0ZpffJx5Re2fL6J+pAtredk1441Ql3FNFSQVkl11GBLlPJOTU9AX7N/Jv+V1UuAqUZEvxxWIW1YZ/vcqRaGRUZ/Dropj1qhyVD20Bty8XRa9XCAz/UMafXDxh4W2ihvn2JvG9G/CKbGsxBtrdXrmfh61hE91P7oUR7RxfW8sxouqNbC/jPy7u9kUcK6U5Mn7l6iHNn073wpmdMGYWpuIsDO6hmBwkgv4dXNHfRa1O/PJu/wW2vufYr2jHFzKqZh057fMsqpwE8ksMWCxSTBnVNoL8Ov0oG/yefE+tNOgjCp51r8i/A6JR+dVj8d5UHpy/n4fm7+dsfhnfTO1Acbl9Utixs4W0r+tPoJes2Y6cK+S1347bJa8W6Y08v+wIk7vUrRHQMahZaA2D46r4uLtwf59o5tzRkQ71/Knj784t7SL5/dFetIz0IadYz+w/Ehn6yVYGfriFgR9u4a1VsXZGygoKCgpXmqu2yr755ptp0qQJhw8f5uOPP+bcuXN89tlnV+v0Dnn77bf58ccfWb58OW5ujuX/L7zwAgUFBee/zpw543C/a5mi8grKK8z4VTFmq60TAfJhctspWzrD3uQ8Hpq/n6cGNyHAw5WBzUK4pV0keqNZuhBFKc4PZiiWD0IegTBxIQQ0oFRv5ExuKeVlxaKYKS8QFU15AYz8FBoPFTPCsXPgvs0SE950hPi/nNktC6WqJG0DnygpXjgzRyzOcKxEaTRYFCspOyWJCqQDf9O74BZwvlNUg6guoiACKaIYSsWcccs7MpJiqoBt74NXqCzuJs63T+9xcZeRsfo9YfYAMTZO/lOuxxktxjhOjAD58G31I4pfJ0ogK0GNRY1kMcPKp+39BK4TboTfxYth6f5Ujp0r5K4eDRx293o3CqZbdCD/XXGM/L8zOSNlJyx/AKL7iC+UynZui8XC8RwTdX2qXU9gQ+j5OER1g3MHJbktuKl0ad39/r5r+5uwxo4nFVxOclU4ZCuFHPj3/C4qXEEK02Q084dRkuS46U3H9yWvEBnduX8rPHFMxphi+kvctXW7RyD8dIcoTsvz5T5XlgcLJ0oC5dC3xNNm2X3iSadSyf2y411iSHzzp1jq97KdU6WW8amO94h3T0hz8eZz1owBiPtd1gBtJorKpOvD6LNOs8XYgoScUor1zhtnf4Ur+rtoNMi6y1whSV/OOHdAmmBD/k/G3Ne9jBeljGwTzv+NaUXbKH/6fX6I+4+3Ys/QFZTcuRnzHaukgLbkbinkRbanbMJPjPwhgRPp4qdjtsCG2EyeXHSIugEedqcM8dFxa/tIQn3cyC818PF6x0EXJrOFdccz6NMk+PxjXjotD/driLtL7UMJAZ46+jcNZc60jix/sDtvjmlJ41BvLBb4YUcyb648QWGZEX8PF+7q0YCu0YHEpReRmufAEFpBQUHhCnDVRqtWrVrFo48+yvTp02nUqNHfcsygoCA0Gg0ZGfbqhIyMDMLCwmp97vvvv8/bb7/N+vXrad26tdP9dDodOt11ONtcSXJOCa+uOEZeaQW3dqjDR+tO4qpR4+GioWM9f/Ym1/RNCfbSUVZhomGIF70aBaFSwfZT2Rw9W8jKI2k80DeGjXGZqFQwsXNd1FiwGGuJ5AQZLbp/G3iHcbbQwLurj/D74TTmjqtDN52PJCWZjdLtqtrxuuldGfEoL5RixLD3JUHKK0wKFXErpXAT3UdSm9SX+CMd1kqUNLmnZSEy4iPQF4uyRaWRx5qPrtmF0+rE8Hj189DtYVEJuXqL8mfYe5LQZbGIuubIUpn1j2gLU5bLCJVRL2kY3mFSyCrJkuMmbROJue8Se28eEEPj6D4S6VwdN1+Jb/1xsvxbXySdR5D3q99/YOMbtv13fQ03f1IzLvQa5nr/XbwYjCYzn22Ip3P9ABqHOhghBFQqFbd3q8dTiw/x0bqTvDrq0gziHVKULl4TQY2h+2N2RRyA1CILRQZo4Oug9u/iDo0Hy9c1TqindGUTC8x0qmWiwiG+dWr/IPcv4t/wu6hwBSlKh6V32zcltn8IB36AezbZzIiN5aLg8AqrfQTq3EHHaUoAO7+QxlDDwRDRXtYLLm7S9FhyF/hEkI83fiM+Bn2BpCfpC2W7ixtM+03Gp4zltSc/Wu/3ai1kxWGu24X3T4Yzc1USX0z253BqPvWDPHCpFvLxV7miv4vmiso11QXGUE0Vso4KbQlqFxj2Pu6ePnig5WxqATM2J1BiMLH1VC5bT+WiVsHUjiHc1mIsUY1GUGzW4e4XwjMrU0l3kBKVXlhOcm4pMcFeJGRJpPxdPRpQN0ACJvRGM8k5zosnKbmlNAnzxlunZUDzEB7t34i6gRdIXayCb7V0qqxiPV9vlTHbugEevDG6JV9uPsWXm+WxSD93XhvVgq7RgXjq/jEHCwUFhX8BV02Rs337doqKiujQoQNdunTh888/JzvbidLhInF1daVDhw52RsVW4+LaIs3fffddXn/9dVavXk3Hjh3/0jVcy5zLL2P81zvYGJfFgTP51A/w4L2xrflicnu0WjUvDm9GHX/7FAkfNy3vj2+Dv4cLI9tEsDsxlx0JOdzUMpwvJ7cnPqMIHzctB1LyWbb/LB+sjUOjVlPhGS7KEweY6/agwDWE3bluHEwt5K5vd/PLwXMYzRa+PVRKcZs7ZUffOrLgqt+rcjZdI/GV+UlinLz4dvmwueo5idg+ME8KFlpX+PNz+GGkRF46S4/yCpWCh0+EFEV6PS2pWSufke0RbeHnh8TM78RKkWd/01u6f0Pfka6cV6iMTE1dAYeXiMliTgLMGwOzB8L3w+HgAinULLlDYk1jf4Gjy8BsFtl4RDsxew5qKBHf1iKOld8el4JSlwfkPfGrK8WioW+DoQxunSMFrsCGUghqe5vM5699ybaYje4LBSnyfk76UWb4q46FFKSKP4DCNcXqY+mcyStjdLvaE/T8PVwZ3TaCebtSSKllAXtRmM2w9F7pYvd5zmFx72i2dJMdFnKuI1w1KoLdVSRejiLHpw6UZIoZuYKCwuWTccyxstTFE4rTJUzgi87wdW/4squMZ5fU0iwqrGXM1KiXYy6q9M1L+RN+f0r85ro9IuNWidswmVUSg23Uw6LJMir662PSEInuD3W6SHiDM5qOkDGk70fA2T2UGy3M3ikj/ltPZtM0zOeKqXKuGEVpUFFcmT7Y2fl+dTpB/FqYPw6+GwYLxqP59WGaexTQt0kIx9PsfXvMFvhuTyaDv0thWXoIOwoDyTR5sSkuy8kJYGtcFs3CvNGqVXw6sS2BXq7sT8lj5ZE0sov0vDSsGZ6ujotkTcO8Gd02grVP9ubNW1oRHez1l5IfC8tE5Q7w4vBmPLX4EDtP557ffja/jLu/31vjdSsoKCj83Vy1VXnXrl2ZOXMmaWlp3H///fz4449ERERgNptZt24dRUW1RBPWwpNPPsnMmTP5/vvviY2NZfr06ZSUlHDnnVIcmDp1qp0Z8jvvvMPLL7/MnDlzqF+/Punp6aSnp1NcXPy3vM5riT2JuWQU2vxWWkT6sOVkFvf+sBeT2cLjiw7y1OAmvD+uNQ/0ieb1US2ZNa0jHq5q3l97krdXn+DAmXwOpRbw3po4Plkfz+ODGqNRq1h7TFRQx84VUmowsToZDBMXyUhSVfzrY7z5M15Ylcp9c/eRlFNKXIbtvV4fl0NC9BTME3+U+HB3fwhrCRMXwB1rpEP24232njD5KSKR7nI/nFwNR5aIkS+IueHQt2u+GWqNFEd8o6DnUyKpTvlTCkPlBRKVmR0P4+eJMfL2D6UDZzbBr49Kp7DDHTC+0hg5sDEM+h9s+0B8bayjamYjHF4ki5p2t9vOf2ihKHEcEdjQ/t+F52DBeFEJdX8Mxn4rho2peyAwWowUO98HkxZKMcpkkP2tY15BTUSt1PMpKQQtuVvm/KsS00/SNRSuKebuSKZZuDcNgi7cLRzSMgwfNy2fbnQsKb9ods0QJVjPp+z9C6pwLNtEgJsKP7erEHt+hQn3VHH6cpKrfK3JVcp4lYLCZWM2SxPGEYNelZCDqoEEhhJY97Lc652NhDvzuAHxtirLt91X85JkdLTL/VIAStiI/9pHUB2ej1ntIsqSbo/Ic88dEDP3UxskHlvjCi3H1TyHR6CMk574Ta53zyzUu79mbBsZ5wn0dOW3w+dQ1Ranfi3iGSypnIcXwU1vO26SdZ0uI+urn7NrDqmStuG6eCJB5Nd6CgugUaswmS0EeTlXFoX7ufNQv4Ysvr8rMcFejP1qB2Nn7OCLTafYlZiLl5uWhfd1pX6g/QiWTqumZ8Mgfj2URrivOx6uf10h4+YiBaOGIV6k5JaS5ST96q1VseSVKA0zBQWFK8dVb696enpy1113sX37do4cOcJTTz3F22+/TUhICCNHjrzk402YMIH333+fV155hbZt23Lw4EFWr1593gA5JSWFtDRbvORXX32FwWBg7NixhIeHn/96//33/7bXeK2wMU5SjUJ9dLwyvBl/JuTw22F5L/JLK0jOKeWJRQd5e9UJ/kzIYcaWBD5Ye5L4jGKOnavZSYjLKOJUZjG/H06zM5RLyCqmY/1A/rdLTfyY1VSMX0DFgNcpHL+M/QMW8sBvuXSq7897Y9sQW61DoVGrcFObUG/4rxRMDi2EnV/JgstQCFvfd7x4MxkkBaq6n8ya/0gR487V0GykSH1bjYW714thoUoFdTpC/hkxKKzbVSLLez4p+2nU8MdHNc+XcQxWPSvjTqufh5IMWbAlbXP85h9dan9ttcmTfSJE9VMVi1mKQYYiWSSN+FgWl9Z4VpUK/BqIubK+SCKgdd6i3Ll9mcS3BkbDvu9EJl4VNz/x2rnGjGj/7ZzKLGZXYi4DmzlWtlVHp9UwonUEy/ef40zuZapychNhw6tiEh7ufMT0SLaJ+j7XfxEHIMxLTcJfiSDP/ouFMwWFfzuORnp9IqSpUuQkDnzTGzKS5Qj/BhDayvG23k/D/u/kPtqgj/i+bH4L1rwoTZhK3zxN/BqMvnUxewZjCW0hfi/+9WWMev8cUbEeXgy9nhQVT/2eENZakv1u/1n+Lmx95/xp3Q5+y9RWbqhVkja44uA59MbrrJCTule8hrZ/CBmxcO9G6Hy/jKTHDICJC7F0uk8UTI7WaZmxuJeeo3Go45RMlQraRvmxLzkPsDC+o3Oj5hGtw3lu6WHUahV3fb+HjKJyXh3ZglFtI/l+RxLT5+3nlV+O8dKI5kzqLMep4+/OxxPb8sXmBAa3qN1u4VII9HSla3QAMcFeHD3rPDjiSGoB5RXXmQpLQUHhuuIfHd5s0qQJ7777Lm+99Ra//vorc+bMuazjPPzwwzz88MMOt23evNnu30lJSZd1juuRdlF+DGkRRn6pgbZRfjy0wNbl0le5uWQXG8gulq7BsFbhLN3vXKa8dH8qr41qSdu6/uxIyOGnvWcI8HLl4w3xbIvPZsl+NSsf7c1//wzk+LlC8kpFKbMxLpNb20fSoZ59x39ki0DqHv8GsuJqniz9sER5OiPzuBRnqhLeRvw96nUTY+GKMukieQbZ7xfSXBaNGhd7U9ask7UnOuUlgneomCMHRDvfz2yUL5Dr6TpdOnpW8s+IaXPyHxDcREalTq6Bja9V+ucESOFGrYWve0rhpe8L4O5rO4bWBUKbw5hZUqxRqcAjWEbNQHyJ7lgpo1rWCNa63WHEhzKupXBNsfxAKp46DZ3qB1z0cwY0C+GXg2eZue00r12OV87q56UA2G6q010sFguHMs0MrH9jzPpHeKnYlGLGZLZcmrzexV0ijbPjr9zFKSjc6KjVom49vMj+cd8ox+sAK4XnRCXrCO9QuO1HWP8qHFsm917vMBj4qqhSmwyDHk/A6c2yf8c7ZST63AFZZ/R/GXReuC6eLLHmWz+AgAai3PGNkpHTXx4SBc+B70XR2mW63EfP7hNlb36y/TUZ9XhSxqsjW7Jo7xlCfNzQXW/R1Gf3SUOs/VRI3gEZR2SUu+NdMrrt5i9rj9wEp4fQZ5zk2SFDuH/ePkxm+2LPvb2i+fXQOYa1CufXw2kMbh7GgZR8NlU2IQHUKnhtZEu2xWeBykJBmZGMQj13dK/P0XMF/LQ39fy+B8/kc8/3e/l0Yltu71oPrVqN0WxmaIsw6lVT6vwVfD1ceW9sGz5YG0ewt+OgFIBQH7e/NMKloKCgcCGuiZW5RqNh9OjRjB49+p++lBsGi8VCu7r+3D5nF4VlRlY91ovsIpv8c3dSLj0aBvJHlWSqizsubDyRyZebTtGvaQhfTm6Pv6cr83bKIuauHvWZtT3R4XGX7j/LXT0aoNOq0RulIz6hhQfuvy10fLKidPCrJyNGjvCvJ2kHVen3ki0y2RpX6gitK3gF13zcxU0+sFWUOX6eT6QoeVRqkVM7Q6WSBK3+L4nBokkv6Rk+4WKmOHuQvTeOxhWmLIWH90khKesEHJhrW3geXgQJG6UjVr0I4+YjXzVeow7qdhE/n/J8uWb3APBwPD6j8M9hNltYvv8sXRsE4qK5+MW+TqthUPMwFu85w+MDGxPg6XrhJ1k5tV7GFfo8Lz/zTkgptJCvtxDjd519CHFCuJeaCrMYONfzvcRFtm+k/G4qKChcPkGNZJz56FLbY8WZoqxxhkeA3Ced4VtHlKv9X5L7rauXGCTnp0DGLHuz/72zodnNUqDZ8YUka258Xe7vQY1lDLs4XTzx0g5K48fqw2OxwKmNkPQHjJkpjRJHqDUEBwSwZW8m62MzmTGlg11y6HVBo4EyEp52EOp2ljVZfoo0m/zqShEspJl8X5x47um96rD5ZAbf39mZuTuTOHq2kFAfN+7oUZ/m4T6UGYxkF5Xzw45kTqYX89TgxtzVsz57k/LwdtPSo2EQOcV60gvUvDaqJUdSC1CpoE/jYO763nGS1uu/x/L80KY8t/Qwt3aow8g2EX/7WxMV4MELw5qRW2Jg9vbTmB0Ikh7oE0Owt2IKr6CgcOW4MVbmCjVILyznnh/2UlgmqhAXtYoO9W0f4OfvTOG+XtE0qZaMc/xcAeM61nF63KEtw9h2Mut8LOTrv8fi7qIhNa8MX3cXRrWLZOURJ9JoYH1sJh9PbHu+M6XT4LxocnSJKFkcoVJBm0mSWgUiyx77rXTi3C9e0VADNz9of6fjbV4houApy4N6PeXfIc0d79t4mCwiE7eJEfLCSTCzH8weIuNZLtW6QyaDJE5ZLLK4XXq3rYhjpSRLtpkvUarrFSwL58AYpYhzjXIwNZ9zBeV0bxh04Z2rMbhFKGYLzN+ZfOGdrZhNsOYlGT2s16P2a8uSn7cbpZATUZlclZB/GZJ337pKIUdB4a/iGSxedlOWS6BB/V4y3txwAOgcNCVA/OK8LzAeo/OUBk9QY1kTVJRBZqwYG1cn9ldJu2o9DnZ+KUWckZ+JwfHCCZX/nSjjVG5+NYvdRr2sBZwocyua3sKnO/PZcCKTqd3qERN88SlJ1wzBzcAzFE5vgQWV78ni26XoZTGBqQJLRiwVrW5z/HyfSNyCo4kJ9mZHQg7Tutfn00lt6d80GIvFwr3f7xXTYJWaB3rHsC42g1u/+pOfD5xlaIswmoV5sz85j/fXnuTjDfHM3pZIu7r+DGgaQmpemVPLpKwiPW4uGoxmC4v2nGF9bAb7k3MxO6q2/AVCfdyoH+TJV1M61FBbjWkXydAWoahqSzqrhfxSAwmZxfx++Bzb4rM4m1eG0XQZI8EKCgo3NNeEIkfh76eovIL/3NSU5NxSftqbilaj4vGBjdkWn43JbKFIb+SxRQd5enATgrxcKSw3Euylo6CsAg9XDS0ifGr45DQO9aJhiBcfrLV5RJzKLCazSM9nE9tRbjRzKrO41qTK5NwS9EYjn01qR16pARcPC5aorqjO7Ky5c0m2LMaGfQBr/yMLJ5BRqWHvSTz4HaskItNkgEOLoO/zlxepXZQhZsJ7ZolpYUEqnFhh2+5XF4Z/KIZ+A16tTMDyFlPmH2+zjS6BfDAe9p54jyRusT9PfjL8PF3GpH55yH6bvlBGt6o/pyqxv0KHO+3HwRSue1YfTcfX3YWmTiLHa8PHzYVejYL4fkcS9/eJwfVi5PtHlkBWrPxuXWCheSDDRKiHCm/XG0MiHuiuwl0Lp/LN9K93iU/2qwsnfhVVnYtzSb2CgsIF8AqBhv2hXlcpLOu8xQh52q/ikWcNOFCpZPSz7W0SWnAxFGdC2iHIOQ1xvzvf79ACGb8y6qHPs5JmZQ1OsJKyAza9KffdnV/ab9vyDoz6QvzzqoxcmmMGUNTrZZqe07CiUyOMZgtl16NXSlGahD9U9wLMT4Zl98LEBWSlpZDZZDpNizNxObXSto9/A/TjFzLpx+TzARdfbD5FHX933hvbGq1aTWJOCX8mZLM+NpOoAHeev6kp//d7LCsOnWNa9/qsOJTGor1nzh8yNa+Mtccz+Ob2DpRcIAFMq7HdrxbtOUOXBgFkFpUT5vv3hjwUlxs5mV7I57e1I7NQT4nBRMMQLwpKDVwwtt0JWUXlvLkyluUHzp1/zEun5ZvbO9CxgT+uf3OEvYKCwvWLUsi5wcgrMZCSW0pBWQUatYpO9QPo3zSE/FIDqfnlfDKxLZ9tOEVcRhEFZRWsO57BPT3r0yLCh1Ff/EFuSQXeOi1vjWlFal4Zq4+lY7FYGNwijAZBnjy/9EiNc/6ZkEOYjw690cyBlHyGtghncZWbb1X6Ng7m/1bGUl5hZnrfGJkvdnsbZg20ecpYieoinhTtJkOjQaJkUalFQu3iKQuuPTNlbKjVeOj3ghR+LpWiDPj5QUhYL/9O3i7+NLf/LF0njU7OkbBRpNQBMVLEAZmjn/qzLBxLsqVj6BkM+mJRFDmiIFUUOS4etrhwK/qi2ke23Hwvr1ClcM1isVhYeSSNjvX8UV/mPP3QlmFsOJHJ70fOcUs754o6AExGMfuM6ir+TBdgb7qRxgE3hhoHQKVSEemlJj7vMrqbfnXFiDz7ZK3m0AoKChdJVXWqWi0+d/duhuI0uR/6RonHnZuv00PYUZIloQdHfoKeT9Q0+69KeSEWjSsqjasoeasXcayc+BXLXWtQVS/kFJ6jKO0UKQPnUc+tFK0+D5NnGIUaPwrxZv7uo7w0vBm7EnPo37SWUe9rFVdPiP3F8baCVCxl+SSo63Pn/ESe7v00Qzo9h58xC1fvQAo0AdyxOMUupRSkGPPJhnheHNYMNxc1STmlhPm6cTAln/t7x/D9nZ0wmiy4atX4uLugVaswVlHS6I1mPlh3kndubY2Hq4ZSQ82CTus6vpzMsCXhlhpMmMxQYXKuyCkoFa/I+MxifNy01Av0JNRHh/YCo84HUvL4YJ0U8YK9dOhc1KQXlGM0W/j2jk70u8Tvu9ls4ecD5+yKOADFeiN3fLuHdU/2pl7gdajuUlBQuCIohZwbiFKDkTN5pbz48xGOnrUtXno2DOL10S15ZskR9EYzU7rWIyrAHbVKxe7EXN5aFcdtXeqSW1IBQJHeyMMLD9A83IfejYMY1CyUt1ad4L3kPIfnDfbW8fXW04zrUIfCMgMTOkWxLT6LtAJ7Y8JhrcJoVceXpdO7E+LtZlMOBDcT75e1r0DSVlmwdb5fDBG9KxN8/OvJV1U8A8V/BnNlKtRlkn7YVsQBMFVISsP2D2Hg69DtQTFZbDTEZiRcFa9Q+apKeaGkaGQedzw7Xpwhr7N6ISe4iRgLxq91fK3dHnYcAapw3XIyo5jUvDImd7lUeYiNOv4etI70Zc72JEa3jaxdzn10iXxg6fH4BY9bbLBwPMfMXa2uM2+HCxDupeJk7mV0yK3+VFknlEKOgsKVQKUC3wj5uhxyT0sRB8TMuH4v+zjzKuhjhlKiCSSgQZ/aQw4sZowqF8pvnon3n+/KOiAwhuzoW9hc3pBgVQCPLEkjvbCCsookAG5uHcHUbvXILTHQto4/GpWK3BI9AZ7XkWeKSmU/yq1Syzi5xkX+BpZk8cpGC3qjmf/bmM7/Ac8NbUJmQjk3tfImNq3I4WF3ns6lxGDiyUGN6VQ/gPIKE77uLjy04ACJ2SXn9xvaMowPxrfhycWH7IySD6cWUGow8vGEtjw4f79docffw4UnBzXm2SWHzz+m06qJCnCnqLyCY2cL8Pd0JdRbh6aySJNVpOetlbEsO2AL+vDWaZk5rSPt6/k5VcAUllUwa7ut+Fc9hnzmttN0ahCAl+7i16eZRXpmbHFsHm0wmdl6MovbuylrQAUFBUEp5NxAZBaW88KyIzVGorafyub/fo9lUPNQvtycwJsrY+22Nw/3IafE/gYEcDytkONphaTkltIy0pe9Dgo5Hq4aIv3cSM0r47s/k3hpRHOeW3qY10eLKd22+CzcXTXc3aMBrer4OTZ+c3GTLtz478V0UKWWueyLkY+q1fwlqyejHvbMdr593xxoM8FWULoQZflSpIlfBw0HyqjXmd0SaV51oNu3jpgmV6XDnaLmcfOFFrfCsaX229vdLu9TdYozJEY6+U8pKNXrIcogZfTjumB9bAZuLmqahzvxhrhIhrQM4701cexPyaNDPSc+UWYzbPtA1G6BMRc85sFME2YLNLmBFDkAdbzVrMiowGKxXJqHgaunqASrjlIqKChcOxyYb/v/xC3S/Di0QBSzVfEIIKPhWEryivBrNxW1Zy3eelod5Rpv5mQ04u5bfsAtdRuqihJM3pF0DPFj8oIjnM23b1ytOHQOHzctw1qF88bvx/n8tnaYalGEXJNo3WzK4XZToPloSbIylkPvpzH7N+RcXtL53V00KpqE+bA+NpP+zWpfM5UZTIT5uDHxm528NaYVTyw6RHqh/Xu4+mg6QV46vp7SnqxiA4dT8/nl4DlKDSayigz8uCeFWdM6cvBMPqezS2gR7kN0sBev/xZLZpVwj3Ed6/DroXPM3CZFF193F/43sgUDm4bgqdPy84GzdkUckIbmtDm7WfdEH+o6Sbwyms0UlVc4fY1F5cZL9rUxmc3klDg2jgY4XaXQpaCgoKAUcm4gCsqMNYo4VjacyOCunvX5cnPNSn9STgmdnH3wA+LSi3iwb0Pi0ovYcdpWfPDWaXl3XGu+3CTHzCutwE2rIa2gnHu+30uHev7c0aM+fRsH4+N+ER19d7+r7/1iscj4lNPtJuAiF1+lufDnZ6LkqUqbiTDodVj7kvw7oh341YfAhvKB0Dscej0liyTr6x/2LnR/CI4ul8JWyzHgF1Vz7KrwHCy6Hc7utT2m1sKEuRDdXynmXAdsiM2gVaTvxXnb1ELbKD/Cfd2Ysz3JeSEn7ncZC7rpvYs65q40I96uEtl9IxHppaKkAtJKLJf+2vzrQfrRK3NhCgoKf42q93OLRfxrxsyE/XPF3wrQNx5FTqcncVdbqLtihDRVBr0OdTqJV1419O3uRq/x5IGAzehmP32+KRMKmBoN5Z0hrzBlUVKN5y3Zn8ptXepy9FwhJQYTEX5/XwT21cCYfw6XLg+AoUhGz+aPtduubjaKz0Y+w91LU3B30fDWmFbM2Z5I38bBeOqcN+J83LX4erjw0fqT6FzUqFSqGkUcK0v2naFP4yDeWhVL95ggZkzpwNrj6Rw8k8eu07l0jwlkeKtwFuxKpmM9f55depiELFuxY0iLUKZ0rceIT7eff6ygrIInFh3kx/u6Uj/Qw6kCRm80sy0+i8mBjtWyPm4uDGoW6lR5NLRFKN5ulzYKr9NqaBjiJX6TDujS4C+EeSgoKNxwKIWcG4i8UudVfItF5KXV541BuhOhvm50qufPHgeqm+eGNgWLhbfGtCK/zMCu07n4e7ripdMyY3MCh1JFkuzuosFSRXWSkltK2yj/iyvi/FO4uEH7ac5HmVpNAI+LTBLKjK1ZxAE49CPc/KlEqYc0heEfyNz/1BUSk6rWipKmqjLAM0i+Ijs4P59RD398Yl/EAfEaWnQ7PLxXPHwUrlkKSis4eCafu3s6Tj65FNQqFUNahDF3RzJn88uI9HNg6vjHJ5JUFdLsoo65PdVI80AN6stM3rhWifKRollcrokIr0ssoPnXr2n+qaCgcG3QZhIcmGf7d+5pCSRoeSvmyctIsYTw3cEieuWYGbBjqk0Zu/VduHUW7J0j6wGLBbQ6zB3vobzDg5jy09CtfqrG6TTxq2kR2Zt2UW04cMZ+PKu8wkx+mSg2Cssq/nKx/mqj9q8LhnxZn3x/c43tqthf6N5gADOmDEalUjF7eyKnMot5oE8MZrOFQc1DWXc8o8bz7u8dgwoZkWoQ5ElavpPkUuQ9rDBZKCwzsvpoOpvjMlk6vTuZheWMahvJ0n1ncHNR0yTMh4cWHGB63xgi/NwpNRjxcXNB56LmrZUnaqx7Ad5dc4KPxre9bAWMVqNmXMco5u5MJq/UXpkT6OnKqLaRaC7R9y7IW8d/hjXjru9qFhRDfXS0ruN3ScdTUFC4sbm+7ioKTskoLCfA03nBRK0CrVrNl5Pb06aOmAZq1Cpu71qXWdM6UmE08cSgxkztVg9PVw0atYrGoV58NKEt+5LzOHimgGeXHkKFithzBby1MpYH5+8/X8QBmNApiti0AlpF+PJAnxh+vK8r4T7XgSIksgNEdqr5uG8d6DANNBdR7zSUwo7PnW8/tFAKN2NmShEHJBbct46MQV3OB+WSTNj/g+NtZmPt6VcK1wTbT2VjtnD+d/Kv0qdxMG6uar7/M6nmxpRd0m1ucctFHavIYOFwlpmWQTfebSKoMrkqLvcyDI/9G0BRuijwFBQUri2CGkPDQfaPVZRB3Cr0nhEM+TaR7/Zk0jbAIEUeK/oi+OlOiROfuADLPRswP7ib7M7PkmrwJjjBSXgB4H9wBne3Fd+SFhE+DG0ZRvu6fnjrtGRXjvj4e1zDDS0nqMrz4fgvEsHuBPfdn2MozODB+fvxdXdh7t2defmXo5zOLmVSpyge7BuDd6VHTISvG6+PasFNLcOYsSWBSD93sor0RAV42L1vVfHSae38ccorzLy3Jo6t8dkcO1fA/F1n2HU6l+GtwvlsUjvWHkvnlV+Osmx/Kt5uWuIzithyMsvhtZ9ML8ZisdQaDd+pfu0KmKgAD5Y/2INRbSNw0ahw1agZ0y6SZQ92p07A5SmwOtbz55OJbQnysv3MdI0O4Mf7uhHhqEGjoKDwr0VR5NwglOiN5JUa6Fzfn91JNVU1N7eJYM3RNJYfPMekznV5dEAjwn3dOJxawNdbThPqo2NQ81BGtonglnaRlBpMeOm0bIrLZOa203w2qT27E/OYNHMnS6d3Z3dSHnnYOhD39mrAre3rkF9WQbu6AXjqtHyzNYG7e0TTOMyb3BI92cUGMgvLCfDSEeylc+yX80/gEy6jSPHrYO8sUbq0ngCtxsk408VgMkhhxRmlOeKv4eZrG+f6KwbNIOlD1c2Sq1KY/teOr3DF2XIykzr+7gR6/T2/C24uGvo3CWHBrhQe6d/QXta980spHNZxULR0wK5zRkwWaBl840WdqlUqorzVnMi5jEKOVeWWfgSi+/y9F6agoPDX8AqBUZ9D4lbY9ZUkSDYbBe1vp8w1nJFtSsguNuDt6QnhbSHtoO25FaWw8yvY+RUFk9cQq/eiSF9OsxAvVIWpzs9ZmkujYHdmT+vI8bRC4jOK6d4wiJdGNGftsXQGNgvBRXP9qRqNZYW46rwvuLbpUteHTyeGsicph3P5ZSRml3DsbCH9mwUztVs9RrWNwFypCvd01ZBdYmBa9/qYzRDmq8NktjC8VTgn0ovo3jCIR/o3YsaWBHYl5nJ717qsO2a/ltkWn83Y9nVwc9VQbjQxb1cKA5uH0qlBAF9N6UBZhQkPVw3ebi5si892cuEQFeDO3qQ87usdw3NLD9fYHuKto/VFNFnqB3ny1i2teP6mpmABP08X3F0uf33n4+7Cza0j6NwgoFLJpcHfwwW/67AYqKCgcGVRCjk3CDL6oOI/w5rx8fp4Nld2INQqSU94qG9DJs3cSU6JgRlbEpgxpQP3/rCPs1UkrTO3JfLKiObsTc5l5ZF01CoY1TaS98baDHZLDSbm70zmnVtbUaQ3UVBWQYd6/pzNK2P8NzsoLJMIca1axR096hOXUYSnTsNTPx1i52lbB7tJqDczp3ag7rUSo+gTDh2mQtPhEi/sEQDqC3yArSiT2PG8RND5QsxASN3reN/oflK4OXcQ9n0nEaktboG63cA38vKu2dUTgptKeoQjGvS6vOMqXBUsFgtbT2bX6ED+VYa2DGfV0XQW7k7hvt6Vhsb5ZyD2V0mDU12cwmZDipFwTxVhnjeeIgdkvCr2cpKrvCNAq5O0O6WQo6Bw7eEdBq3HS+CA2QTu/qDREqAv5p1eWlQHf0S1ORma3SyBBGtfgpxTtudrXCl38WPGxtMk5ZTw9e3tMTUejva44yhuU1Q3vH39efzLgxTpjecfn7n1NF/c1p6BTUPRatQYjCZctddPYdzkWxd8IiUd9OQax/vU7UlyiZaHFuxnSte65wsnXm5aPloXz9Su9fD3dOHI2UKGtwzj6LlC/vfLMRoEe9I03IfejYKYPm+/3fum06r5cHwbejUKolm4D22iLPi4uzBvV8r57S5aNck5JVgsYDJbzjsZ+ri74ONua2CMbhfBl5tPOYwef6BPDB+tO8mAZqG8PKIZX2xKILdyzGpA02D+e3OLi1bAeOi0eFxCOtWFUKtVhPu6E+6rKHAUFBScc2Ou0P+FBHi6EODhQmJOKU3DvZk1rSNfTm7PN1M74u/pSkJ2MeM71gFgTLtI5u9KtiviWHnj9+OM6yAqFLMFlh84S0JWMWkFtn33peSxOymPx388yDurYynVG7lv7t7zRRwAo9nCrG2JGIxmZmw+bVfEAYjLKOLeH/aRVVQzLesfxTNQRp4uVMQpL5SI0y86wQ+jYFY/iOoMbn4193X1hM73wp5Z8E0f2PctnPgNlt4N3w2HnAT5oK0vhYKzkkBVmHbha/UKgaHvON4W2uqiUokU/jkSskpILyyn1d80VmUlwNOVHg2D+GZrIuUVlYWKPbPAxR1i+l3UMSwWCxuSjbQLvX4+dFwqdX1UJOSbMVxqkoxaA/7RUpRVULiRMRmhIFXuSUUXcU+60uiLID8F8pIlIfJCeATI/VyjlcbLid9Qf90D1c4v4cTvsPF1+OUhuOkduyCBkg4PUKgNoHmED8k5pSRmlVAU2kkUjdVRaynv/SLTf4q3K0aAmOU+u/Qwbq5q9BVGCsqcJxxdi1hcfbF4hYFPhBTHqqNxIaP94xzNMvJQ32iGt4ogOacEtUrGgw6l5rMlPosf95zhz1PZVFgs/LT3DK/f0pLGod60jfLjlV+OOXzfXv7lGO3q+nPPD3t5YN4+6vh7MKi5JGGNbhtJhI8bB8/kM7h5KFO71cPP3bGpcKSfO3Pu6ISPu63IolWreGpQY7x1WkoNJmZvT2RLXBbf3dmJXx/uwZrHezGuYxS7EnOJTSus1X9SQUFB4Z9EUeTcAOQW6yksq8DdVcN/lh2hrMIEW07b7bPqaBoL7unKvJ0pIl1deMDhscwWiE0rJCbYi4Qscc2ftzOZ10e3PL9PoKeOonIjBpOZ9mF+rI/NcNjtAPhm62lGtY1wuC0uo4isYv21M2J1KeScghWP2P5tscDaF2HsbNjxFZzeII/V7wk3vSv7bHqj5nHyEmHrexLtuWeWFHnKC8CvLgz4L8T0l8WoM+p0hCnLYfVzkkak1UHrSdDnWccLL4Vrhu3xWbhoVDQN+2ux444Y1SaCbfFZ/LT3DLd3DIP930PMACnmXASHs8xkllpofwMXcur7qDGaIT7PTIugS3ydgTFwzvHfUAWFG4KiDFGP7vwSyvOliDHgv6Jyqe2edKXIPiXKmfg1opqt30sKMEFNLs7HrjhDijaWamuV0hzY/pGEHhz4gYJOj3PUfxA+uNK3cTA9GwYR5KXj8ZVn+Gjccrz/eBOXuBWi9IloR3rP10kzhXModZ/D0+aWGNAbzYD6ko1v/2nczEWoVj8LPnVg1JewZ6YocyxmLJEd0A96mxfWlXAyO493x7bmj1NZlOhN/G9kCxbvPYPFAkFeOo6nFVKkN3LsbAH39o4mv7SCFpG+RPi6OTUTzi0xUFhegQrJDf1o/Uk+v609pzKLuadXA05lFuPhqkWrNhPq40ZWsZ4Q75qejK5aDd2iA1n1WG8yCsoprzAR7K1jf0oevx5K4+khTbBYwM/Dhfm7kukRE8Tzy45QarCpNYe0COWN0a2uz7WqgoLCDY1SyLnOSc0t4eCZAjKLy2kS6iNFHMDHTYu/pytZRXpKDSYyCvUUlFcwa1oHXLQaO/O46hTrjbi52MRaheVGtGrbv29pF8k7q2WcJ8Tb7XzUo0atolWkLy4aFSfSiijSGzmTV0qIj/ObX27xNabIcYTJKN3I7HgozRZj5O0f1dwvMxaW3gMD/gfD35MRFjc/iRTf9Jbz4x9bLjLwPz62PZafIoqdER9Bu2mgcfJBU+cFDfvDHb+DoRjULuAZrMSOXwdsP5VNoxBv3Fz+/mJJuJ873WOC+GzjKca57MCtLA+aDLvo5/+aUIGvDpoG3Liizbo+alTAsWzTZRRyGoqqrrwQ3P7+QpyCwj9KWR6s+Q8crWLwW5AKy+6FYe9DhzsdF08MJTJunH5Eii1hrUU5qvP6a9eTnwJzBtkbjCdtg1kD4YFt8vt4IVL3SQiAI5K2Yxn6DrFR4zlV5k2otztTZu+ioMyIh6uGmVM7cii1gD9yoiiv+zwtWz2NyWzkcJaFL1bk8eJw+/WUp6uGIG8deSUGCsuN6CvMbDqRyeMDG/+FN+Hqozp3QApW+cnw0zTxDhz3nWzLigN3f7YknARkvH90u0gGNgtlzbEM9iXnoVJBj4ZBfPdnEkNbhNImyo8Xlh1hT6WP45eT29d6fn2FGa1ajcFkriyGWfhqcjvm70rm98PplFYYKSwzsvzAWTrXD+Cz29oR6iBgQ6tRE+nnjoerhllbT/PFZlvc+PKDZ5nQqQ71Az0Z0iKcB+buw2Cy905bcyyDlpG+PNgnBo3mxr0nKigoXH8ohZzrmPIKI0fOFfLwwgPMmNIei8VCVIA7TwxsjFqlIr2gnDoB7hSVG3l/TRwVRjO5pRWYzQaahnlzIr3I4XHbRPnx7R9J5//t7+FCuVEKRFO61CWtoJzMIj0qFQxqHkpSTgm+7i4MaRHGnqRcyitMTOten8xCPauPppGa5zxaMuRaT7UyGSXpZ8E4kXUD9Hoa8pIc71+WB3u+kdl7T5tU+/xzHZ5D73z7hteg0WDHku6qeIUAIbXvo3DNYDSZ2Xk6l5taXjnV1Jj2kTz90yG+33SQ+yPaXbQXk8ls4ddTFXQJ1153HeRLwU2rItJbxdFsE+Mv9cnWD45phxQvKoUbj5Is+yJOVTa+AY2H1gwCKC+AQ4tgzfPy4R+kmdH3P9Dp7stX8ZjNcHSZ45S4ilLY8SUMfRO0F1hLVDiPkQagJBuzV1tiT6fx6I+Hzj9cajARn1HEF7e1IzG7lNPZJgwqb/w8XPlqTyxn88swWyx467ToXNQ8NbgJ3jotqXllhPm6YbZYqOPvzsLdZ5jStR5h15PnSXmVOHV9kaiG98w6/5Brk+GAmAabzRZSc8uoMJsJ9XXj3VtbExngzqrD5zCZLXSsF8Brvx4/X8QBEUd567Q1RqtAxp983F3siioalYqyCjM9GgZTx98DbzctnjotC3edYVDzUE5lFrPlZBYh3joi/NzxdXPB39PlvC9RSk6pXRHHSkywNzsSJEGyehHHyqxtiYztUOeyPGuKyyvILjGQW2zA3VVDoJerQ/WQgoKCwqWiFHKuY9IL9Ly7Og6NWkWAuwv+Xjr+b3Qrnl1ymPTC8vP7xQR78snEtuiNJqbP20/dAA+eG9qURxbup7owp3ejIBKzSs4re0AM4TQqWPFwD8orTCzdd5Z7ezWge0wQyw+c5c4e9dkUl8md3+05/5w5fyTRLTqQ/xvTiiV7Hac9/D975x0fVbW14edMn/ReSSGE3nuXIooFFRGwo9jF3vV6bffqp157xw6CBRQrICKI9N57CyEJIb0n02e+PxYhhCQISgvs5/eLkjll9pnMPmfvd6/1rl4pYbXKK56WlO2DyZdLfn01xXsguo2YndZHXFcwH2bi3HoYLHuv/v1TBolYVB+2YhlM/ZWQo2hUbNhXSoXDTbv44+uPcyixwVYGN7Xyblo3RnWM4GinUQuy3ORW+ejf5MxNq6omKUjHxvy/YXgcnCBpavtWKSFHceZRsKvhbfaSAxP8w4Scgl3w6yO1X/N5JaU4ocffNwZ3VsCOXxvenjZP2hPwFxPj+G4NbwtPxamzUuX08MFhaemtYgKJD7Vy6xera6XbRAaaeW1URx6aup4pKzN58LwWpET685/pWw5GKQPEBluYMLY7Yf5GGo6DPk2Jat3wtogWaEYLTUKt3NIvBatJz11frSG3rCbKukmolY/HdGPhrkKSIvxYML12BakpKzO4a1AqL82qW7Dhhj7JzNiQffB3k15HfKiVudvysLs8lNvdzNy4H6/Xx6Sbe/LA1HVs3V+zIBYRYOLVUR1xe3z0SpFKql+tyKj3Uox6DavJQE6pvd7tAKU2F+5j9VMD8ssdvDV3B18tzzg43k6J8OfD67vSPDrwmM+nUCgUh6JiBBspBRUOHG4Pzw9vw0939eW3rbkAPPXTploiDoip6ltzdrKnUEpVZxRV8dO6fXx4fVd6pYRh0uuIDjLz+AWtuKFPMq/9vh2QygBj+ybT4sDDpszmxuHycNegFLZkl3PLF6v4bXMOZTYXb8+tO/BbmlbI/O353H5OCld3T8BwYHVfInmieGN0J8L8T/Oc48zltUUckJSK9qNAX4+5ns4Ave+quzoYngpN6xnIGiyyYrl2csNtMJzmn5HimFmyqwCrUU+zyH+YcvAXjLSsxIuOl7LaH/UxkzY7aRqs0SzkzH88NA3WsbXQ26DHV4Po9BDeouEqdQpFY8YacuTthz/fXDZY+m7D+y96XdIQ/w56I/gfIdrULwz0R7EgFBiLr9O1dV/XdDDwCfZ5w9hfz0T+9gHNeHxabc8UkAn6q7O3c0OfZHbnSbnxd/7YVUvEAdhfauf2Sat54fL2hPjVb8h72mIvg3ZX1H1d08HAx3FrRl6+ogMtYwJ4bNqGWiIOQFaxjYe/Xc9LIzrgb6q7brxgZwGFlU5eHdWBNrFBGPUazSL9efbStkQGmJm2Zt/BfR+9oCUA2cU2Zm/OZXtOOY8MbcnLV3TkpV+31RJxAAoqnDw2bQOlNhdpBVLdqiGz6RV7imgSaqV1bMNpss0i/WtZDhwNLo+Xr1dkMHlZRq1F07SCSq75ZDn76yk4AmB3ecgsqmJ7ThmZRVU1RQsUCoXiMFRETiPD5faSXlhJemEVhRUO2sQFcdVHy6h0uhnYMoq9B8Saw1m5t5jbB9ZUMZq3PY8uiaE8cWFr9JpUmXr3DxFj3r+2K063F5NBY8aGHO6YvJoZ9/bD54W8cjt7Cqp48PzmzNkSTEZRJbM25zTY3s8Xp3NpxzieGtaGOwY2o9zuxt9sICLARKClEQxq6kuhcjsknPvyj2DOs5I/DmJQfNn7EJpc95iAKBjxkfjhLB8vFTea9oeuN4LjCAPc5P7gF/GPL0NxerF4VyGtYwNPbOqSx0Hw3tlcGWXmsx3NGd7CTZ/4I9/ytxd5+CPDw20dTWjamZtWVU1qqA67B7YXe2l3rD45kS0gbb7kB5wFn5XiLCIkUQSS+tKZkvrWqvAEyDOxtP5oBwDKssFtB/6Gn5TRCr3Gwdaf69/e5z4pL/4X+Kwh5HZ7mKgmPdAtfx/Kc6Ssdu+7cAQmcdc32Tx4fqtax5j0Okx6HYWV9Vct2pBVyn8va0d4gImcUhur9hbXu196YRVmgx6rsZENuUMSpGBDXGdY84X4H8V3kSqcFYXkVGn4fD7K7d46AlY1m7PLKLO7cHo8aFpdr+mPF6aRGObHpzdIxJTH68Nk0PHh/N0Ho6FGdU0gJcKfy99fTOUhgtqS3YVMua0X83fm1/veuWUOzAYdny5M48URHRjeKZ5Zm+qOV3/fksvU23uTU2onPsRab0XXJy5qTeQxpkPllzv4eEFag9t25JUTe1h587wyO+/N28U3KzNxuL2YDTqu6p7AXYNST38rAoVCcdJpZE8Vxd6iSm6ftJp9JTb+fXFrfli7D4/Xh9mg+8tS3k635P7qNHhjdCemrdnHS7O20SY2iEGtopizLY852/LqPdbm8rJiTxFvzd1xsMx4/9Rwnr6kLW/M2dHge5ZUOfH4fPiZDSSZG+HXrUn3+l/fPVf+f8MvYvCITwa3R6oUFRgDPe+AtiMkt3/LjzB1jETrXPoOfH+bvF5NSKK8/lero4pGhd3lYfXeYq7snvDXO/8T9i4FZyXn9ohnmUfHQ/NszBzpT6il4VXFN1Y5iPLTzoq0KpCIHL0G6/I8xy7kRLSCjd+KEWto0olpoEJxKgiMg2umwheXHXi+HSC4CVz2LvgdJpyY/CGxb8MRak16gPkfpJFEtoR+D8Gi12q/3ulaSOp9VKfw+eCFBSX4mTrx8KWT8de5sXt1LMhwMndlBf+6uA0Ol5eUCP+DlZTMBh2VzgYMkg+QXWLjie83/qVxb5ndRXGlk1D/0zyd/BC8bhd6TQcbpkhlTWuoFHVY+Rm+859n7oZ8WjRNxPEXESOVDjcbs8o4v00Mv9Wz8JcUZmVHbjl3fSWVAFtEBzD55p7YXR4mL9+L1ajjmZ831RJxqimocNYRhw6luMrF/lI7NpeHTgnBtIgOYEduRa19TAYd4QFGooLMvHtNZ16bvYPFuwvw+SSF7t8Xt6ZH8rF7PNldnnr9f6rZnVfJgEP8r8vtLl76dRvfr62JRHK4vUxcupcKh5vnLm1HgKURjqMVCsUJQ90RGhH5ZXbu+WotF7SLoXNiCOszS9GAN6/qxLacckL9Gh4gmPS6g6lN57WJZuXeYuZtF9Ems6iKVjEND7Ligi3YnW7+O31LrdcX7irkP79s5oquCczcWH9UTt/UCALNjSDypiEiW0FYChTVs6rS5fpjn8BpGgRGy787XSurm2l/iqny7QukpHFRmpQVj2oDQfWXblc0XlbvLcbp8dI27gRXO9o5G8KaoguI4M5OXv610M7dc2x8fqEfJn3dCJJ5GS5m7XEzrrPp4L3iTMek10gO0rE218N1bY7x4KgDq/cZy5SQoziz0OkkWuXOpZC1Agp3Q3xXiGoLwfU8k/RG6HqDGP0fnoqsN0LfeyWy5u/iFybn6DAadv0uz8vmQyAo/qhNlHU6jRFdmzD285VMqUdvMhv09G0ezutXdmLc5NVkl9opd7iJDDDXG0kCEGA2kBjmx+MXtsKo1zDqtQbTNCMDzBQ2MiHHbQlD73HB8A/Ei6goHVpcCK0vIS8/j8/WeHi3lQENWSCsrxiqUa8R6mfi44V7+L8R7dBp8NvmHLwHAhnPbRXFdb2SuHPymoPH7MitYEduOf2aRzK2b1NyS+0sTasnOuwAfiZ9ndS3amJDLMSHWvEz67EazUwY24PJy/by1YoMbE4P57aO4sHzWhIf4odBryMq0ML/RnbA5vTg8fkIshiIDrL8rQhVi1FPsNXYYEpX8+jaqdWFFU5+WLev3n1/WLuPewY3V0KOQqGohbojNCLyKxxc2imOtPxKbv1i9cHXP1uczkXtY+iaGMJ5baL5fUtunWOv65XI6gNhvxe3j+WJ7zce3FbucFNud9EmNogt++um+Tw8tCXvzqvf/HDhrkL+PawNCWFWMotqD+CMeo37hjTyB09QLFz/I8x8WAaQPp+U9z7vP5Ay8J+dOyBKfhJ61LwW0fyfnVNx2rN4VwHBViMJYX4n7k0qcmH/Bmgv/gYRfjru62rm5eUO7plj461zrVgMNQPTzDIvD82z0zFSR7/4syMap5pmoTpW5Rx51b1eLMESNZexBDpeefwbplCcSnR6ESiPVqQMSYKxs+CnuyB3k7wW0QIufRdCm/7z9lhD5Ceq1V/t2SBtYoPolBDCusySWq8HWQzcMTAFu9NDmd3FpFt6siOnnMJKJ4EWAyO7NOHb1XWLNtx6TgqvzN6Ohka7+CCu7pHIF0v31tnv/DbRuDxe8iscpEadWF+040mxMZrQxHMw/vowuoBwiaqa+yyFXe7h/bRYzmsThN3lwWrUc0nHOH5al13nHKO7JVBY6aS4ysl9X6/jmp6JzLy3P1VOD0VVTpanFXHH5NXYXbWrRa1ML6Zf80hAom4a4uf1+7i5X1Pe+aPuGLV3s3C27y/ntnOaHUxriwux8sB5LRjTOxmfz0eQ1Yj/IdHiJoOOuJC/JzqKIbKXIKsBo15PVKCZ289J4X+/ba+zb0yQpc53ocTWcHSR1ycR7uBf/w4KheKspBHPsM8eiiqdbN5XitmgIyrIwsuz6j4UZm7MoX9qBKO7NSHM38SPa/fhcHvxN+m5umciLaMDaRrhj9mox2LU1wlRfenXbbx1VWemrcli1qYc3F4fMUEW7j+vOYlhfszfUVDnPatZsruQ/43swNfLM5m1KQenx0v35FBuO6cZO3PLaR0ThK4xr/CHJsEVn0JVgXgBmIMgMFZWLRWKY2TRrgLaxAahO5G+KrvnyUp4dNuDL7WN0HN/NzNvr3Zw6feVPNLDTNsIPWtyPTyz2I5JD+M6m88Kb5xDaRWmY3a6m7wqL1F+x9ino9rA3iUnpmEKRWNCb4C4TrLwYS+WRQ9rqCxWnCZEB1kYf11Xpm/IZtKyvVQ5PZzfOppbz0khMcwPnU5j8rK9hPqbaRkTwKSlGbz061b+e1k7kiP8+WzRHgornTQJtXJzv6YUVzn5c7v4swxuFck1PRLxN+n5YuleKp0ezAYdl3eO55KOcXy3KotRJzqd9jgT4mdk6hYzQy/+GIO9mJKKKvbGGpiw3kHvlHB0Oo2bJ6xi2rg+XNguhvAAE1NWZFLp9BBkMXBdryQu6xjH1R8vA6S094Ql6VzQLoacUjv//nETFQ2kHlUvdNickorcOyWcpWmFdfb7bXMuj1/YigCzgffm7aLM7sak13Fxh1iGdYhFp0FSeO1FE6NeR0zw8fObKahwsGZvMR8uSKPU5mJwqyiu65lIk1A/RndPoKjKyYTF6bgPhCy1ignk/Wu71Cll7lePIXSt7Y3RnkChUJxQ1F3hNKfM5uLjBbv5YH4av93fn/f/3N3gvtPW7KN7cig2p4dJN/XA32IgvaCSn9Zl88nCPWgaXN8zkeQIP6KDzLUqDJTZ3dwxeTWXdYrnh3F90DSNiAATAWYD+eUO/E11xZ9qAswG/vX9JrokhvD6lR3RaRpb95fxr+83YtBr9GkW0fhN2ixB8qNQ/ANKbS427Svlpn7HYYW6QXzi4RTdtk51mS7Rep7rZ+GzDU5u/a0mgq5TlI47OpkJMp9dIg5Ay3ARb1bleLgo5RiFnOh2sGOWmICeRhNWheKUERApP6cpMcEWbu7XlMs6xeHxQqi/EbOhJgpxaNsYcsvsvDBjO6lRAVQ4PDw6bQPT7+lHm7ggbE4PhRVOvlmZwebsmgjmL5dn0KtZOJd1imdQq2iKKp14fT4W7MjHB6xIL+Kuwamn4Ir/PnanF52msd9h4T8zihnZtQnBViND27r4dlUmaw9ENmUWVZFVZOPCtrFc2S0Bl8eHXqexPrOEkeOX1vKJ6Z0STqifkU8WpnFF1yZMXJJe533NBh09mkrKnMmg44e1+3hxRHs27iutI/zc0q8pYf4mbu7XlGEdYg8KORajGFUHWY3kVzjYU1CJxaAnzN90XNPbiiqd/N+MrbV8bXblVfDNigy+H9eX1KgAHjyvBdf3SqKkyoXVqCcswEREQN1qpBH+ZtrFB7FpX93I+HbxQYQ3orQ8hUJxclBCzmlOfoWDD+aLP0tGUVWDubYAJTYXTSMCSI0KZGtOOW3jgnB7fCzdLasYPh9MXp5BXIiVuwel8tRPm2sd73B7Wb6nkJ4pYYT7m2gXHwzIg/T63kmMn1/XJybAbKBJqJU9BZXsKaisVS6yervnSE50CsVZxLK0Qrw+aB8XfOLeJH+HVIlpcUG9m5OCdDzXz8K+ci+5VT6i/TTiA8/e6LIwi45oP43l2W4uSjlGP6+YDvL/9IX1l+lVKBSnHZqmNViBKNhq4N0/svhjex7X907i00V7cHl8ZJfY+HRROot21R+dXFLlorDCQZXDw47cCmxuD3pN47JOcRj1Ot64qlOjm4i7vF5aRAfi8/lYvbf4YHr+4ewvtfHhwrSDBTcmju2OQa/j00V7aok47eODuXtwKmU2F9tzyhnRpQk7c8tZsrsm0sZq1PPZjd2ICRKhIzLAzCMXtGRtRjGf3tCNGRv3syajmDA/E5d3jqdbchjBVvlcAy3iRzNxSTq55Q5uH5DC8jVZvPPHroMeOl2SQnh9VCeSI45PitK+4qpaIk41ZXY3/5u1jddGdyTQYiQp3EBSeD0nOISwABPvX9OFGz9fedBwGyAlwp/3r+lCeD3ij0KhOLtRQs5pzsr0GoO3r5dnMKhV1MFVkMMZ2DISr9fLK7N38tgFLbn7q7XEhVh58Yr2mA16yu0uQv1MzNmaS1SQhQfPa8Eni9IOVqHq0yycOwY046Fv1zP19ppKECaDnrF9m5KWX8nsQ/x3Qv2MTBjbg7IjiEv9m0cQ1BjKjCsUJ4FFOwuICbKc2Ai1tHkSPRbW7Ii7xQfqiP8HhWTOJNpE6Fi878iVV+rFLwyCE2GPEnIUijOB4ioXv2/JweeDCUvSeXlkB576cRMfzk/jwvaxDQo5/ZtHEB1oZnNFOSsOjNsuah9DRpGNt+fs4MUrOhDubzrmEtanEpfHx/j5u3j0glaE+ZsoaqAMe1yIH4UVDiIDzDx0fgt8wH3frOXec5sTHWSh0uEmKdyPdZklzNueR5MQK00j/HlgyjruO7c5Y/smk1vmIDbYQsuYQKIDLRgNOnLL7KzPLGH6+v3odBAf6kff1AiCrUbsLg/NowMP+tmU211MXZXJ8zO2AtA1KZQVaUV1rAjW7C3hmo+XMW1cnzqpTX+H2fV4UlYzZ2suZTY3gccwBk4M9+eb23qRXWIjq8RGkxArcSHWxh/VrlAoTghKyDnNOdRHIzbEwgVtY5iwJL3OAzXQbGBU1yZ8umgPpVUu/EwGcsrs5JTZWfNVMTFBZl66ogM3TVh5sLLAOc0jeGF4e0wGHfEhVqZvyObur9cwrH0cEYetHEUHWXj5ig48PNRBWn4lof5GEkL9iAmykFtur1WysxqzQccD57WoZSSnUJzNLNyZf2KrVXndsGc+xHRUHk7HQPsIPfMynORUeonxP8bPLaa9iGcKhaLRo9M0TAa5B/y5PZ8qp4fXR3ei0uGmdWxQnbR0kCiSW/o35emfN7NwZ010yR/b8uiWFMqDQ1uSXlh54isVHmd0msbqjBImLdvLYxe04rFpG+rsM6xDLImhVt6/tguVTg9fL9/L/43owPvXdqHC4SbAZGB7Thn/+mEjJoOOd6/uwsr0IkptLhxuL//7bTtmg44vbu5Bz6Y1ISu5ZXbunLyaNRklB1+buzWP/s0j+PfFrQm0GIkOsuD0eMgvcVLpcB8UcUBMll+dXddPEiC71M7O3IrjIuQcyetO0zT4G9nKUQcWezolhv6DlikUirMBNdI/zemeFIqmSYrSZR3j2Z5bxptXdmJo22j0Og2dBoNaRvHuNZ3ZmFVK96RQLCZdnRSsnDIHk5ft5e2rO9PiQMnDhbsK+HZ1JoEWA+O+XMOPa7O5d3BzbuqXTKC17gpCqL+JFtGBXNAuhp5Nw4kLsaLTacQGW5l8S09u7JOE1ahH02Bgi0h+uqsvTcOVw75CAZBVXEV6YRXtm5zAtKrstWAvg9hOJ+49zkDaRujRgEVZf6N6VVwXKE6XEs0KhaJRE+5v4rpeNZW6VuyRqkrP/ryZx6atZ9JNPbm0YyxGvYamQb/UCL64qQdZxbZaIk41q/YWU1LlpNTmIsSvcaVWhQcYGdW1CZOXZZBfbue9azrT9EBKUqifkXsGpTJuUCr/N3Mrd0xew0NT17NqbwnFlU7C/E1MX5/NdZ+u4O25uxjQIpJ3r+7Cp4vSGNQykpwy+8H3Meg04g+rFDVvW14tEaeahTsL2FtYRVyIlYIKB8//spX7v1nLjI37a+0XaDEcTPWqjw1Zdc/9dxjaLoZAs4GAehYsL2gbQ0g9Y2mFQqE4XqhQidOciEAz397eG6fbi16n8dXyTDZklXB553jeuqoTGhor04u45+u1JIX78+qojlQ5PUTXE4Y5Z2seu/IquKFPMr1TwnF6vCzcWcDOvAoeu6AVJVVOvludxdb95TxzSRuCjuEBFBdi5V8XteH2Ac3w+UR4OpbjFYoznQU7CtBp0O5E+uOkzYeAaAiKPXHvcQYSZNZoFqLjzww3I1se42Qrtr2Uat79B4QfOZ1NoVCc3uh0GsM6xPHD2n21zIzLHW4Sw/3Qa3D/kBbcM7g5DrcXl8fL/hI7ny7e0+A5Z2zYz8NDW6JvZNU73R44r000c7fm8ersHXRNCuG5S9sQE2zF6fayp6CSOyatJqOo6uAx4f4mskpsvDhzK89c2pZBraLRgPTCSlweL5d1jGf2llyJ1jYZeG32dl4c0YGYQ8asRZUOJi2rW8a9mglL0mkVE8iyPUV8tyaLdvHB2A4rxuFwewmyGCiz1y/Ox4dY8fl8/6hKo83pxs+o56Ur2mNzeQjxM/HNigzmbM0jzN/Ew0Nbqoh0hUJxQmn0d5j33nuPV155hZycHDp27Mg777xDjx496t138+bNPP3006xevZq9e/fyxhtvcP/995/cBh8jVU4Pr/++gyW7C7lzQApOj5dKp4fJyzOYvDyj1r5Ot5ecUhufjOmGzelhUKtI5m3Lr7VPemEVJVUuzAaNUeOX1zKiq2ZtZgl3DWp2zEKMyaA7LqGqCsWZyIIdeTSPDjxxAzu3DTKWQtNz4CwrIX486Byt59c0F06PD5P+GD4/ox9EtYWds6HHrSeugQqF4qQQE2zh/y6XKknztudh0uu4rFM8eeV2NmWXUe5w0yI6gAenrier2Ma4gc1webwNns/p8TbKsZHL4+WPrbn8b2QHtuaU8cfWfL5dlcXtA5rx1E+bWFtPxMwt/ZsydWUm67JKKalyEe5vIru0ih5Nw7l5wspaY87YYAtf3NSDpAg/DPqaBAGfT8azDeF0e5m4dC97Cyt5++rOPDFtAzf1bcqn1IhpP6/LZnT3BD5ZWFdgC7IYCPaTalZRf9OzqNTm5Kd12fx3+hZcHvErMBt0PHpBS0Z1bUKbuOCDJdQVCoXiRNGoU6umTJnCgw8+yDPPPMOaNWvo2LEjQ4cOJS8vr979q6qqSElJ4aWXXiImJuYkt/bY8Xh9fLsq86Cj/6JdhQxtG93g/pd2imP8/DRumbiKaWuyeHRoK67rlYj5QL53sNXIkxe1ZkzvJCqd3npFnGoOXWFRKBT/DJfHy6JdhXSIP4HROJkrwG2H2I4n7j3OYLpG66lwwbLsv2F6HN9NvImc6r6pUJwJmAw6PlqQRpifiWEdYvl88R6e/mkz901Zx4cLdrMzt5w3r+zE4FZRLEsrZHCrqAbPdUmHOIyNcLQdYDbQq1kEL/66jQmL9xIRaKJphD9fr9jLw+e35Iou8RgPiN4RASYev6AVNpeH5XvE7HldZgmdEkPo0yySu75cU2fMub/UzqPTNlBpr33PDfEzcVmnuAbbdW5r+cznbM1j2uosLu/ShOxSG31Tazx2/tyRR5vYIC5uXzs6NTLQzGujO/H23J14vH+/ourO3Aqe/mnzQREHJArov9O3EhtiVSKOQqE4KTTqiJzXX3+dW2+9lbFjxwIwfvx4ZsyYwWeffcbjjz9eZ//u3bvTvXt3gHq3n24UlDv4fHH6wd837ivlrkGpNI3wZ89hxsLxIVbaxwfzym9i7jZnax7X9EgkxGrk9dGdiA4yExtsISbIgl6vo6Sq4UpTIPnFCoXi+LAqvZgKh5tOCSEn7k12z4PQZKmkpDhmEoM0ov00fk1zcU7CMd7/EnrA6s9EzGl54YlpoEKhOGlEB1loFRPIt6uzCLIaawU5ZhbZ+H1rHrvzKxnVtQl+Zj3RgRa+X7OvziJYQpiVc1pE8vvWPK7snoCfqfGMrXQ6jVYxgYT4GVm9t5jd+RUHt323eh+TburB+W1i8Ph8VDndTF2VxYo9NZVWo4PM+JkMbMspJ7+ifr+atRklFFY6CT2kwIZep3F553i+Wp5Bdqm91v4pEf4kR/gfTHv7fWsuH4/pxrjJa/jfyA70SA7juzVZlFS6mLUph3sGp3Jdr0Qyi20EmA3YnB5e+nUb8aFW/P/m36LC4eL9Pxv2RPt44R5eGdkBi1H/t86vUCgUR0vjeaIchtPpZPXq1TzxxBMHX9PpdAwZMoSlS5cet/dxOBw4HDUPoLKysiPsfXzx+HwUV9WuTvXkDxt5eWQHVu4p4tdNOXh9Pi5qF0uf1HAe+W79wf1aRgcSG2Ll15nb6NAkhNSogFpGe2H+JjonhtQbGhvmb2qUYcCKM5tT2Rf/KX9uzyPEz0hyxAky/7aXQvYaaHXxiTn/WYCmafSK0zNzj4vn+lmOLb0quIn8bJt+Vgg5jbkvKhRHQ5i/iecvb0fXpFB+Xp/NmN7JLEurESmW7Crkul5J7Mqt4KsVGdicHl4e2YGluwuZtXk/Ph8MbRvDkNbRWAw6fly7j8s6xR13IedE98WYYCs3923K3K21I90dbi8TlqYTGWBm4tK6fjZ6nUbf1AgAym1HXjh0uutGQcaH+vHtHb35ZmUmP6zdh07TuLBdDN2bhvHodzXVs6rTsJweL/dPWUeXxBAeHNKCbslh6HUa13+6nN35lQSaDdjdHlweH3qdxltXdfrbPo4Ol/eIUet7CyuxuzxKyFEoFCecRhjsKRQUFODxeIiOrp1qFB0dTU5OznF7nxdffJHg4OCDPwkJCcft3H+Fn0lP9+Taq+uFlU5umbiKTdmlvDqyA5/d2J3+zSN49ufN5JY5DuRyx/HO1Z0J9TMy5bZenN82pk61hFB/E6+P7kh0kLnOe352Q7daxnMKxenAqeyL/5Tft+bSsUnIEUuV/iPSFwI+iOlwYs5/ltA73kCpA+Zn/o3qVQm9YNtM8PyNYxsZjbkvKhRHS1SghVv7p/DZDd3p0yycq3vUfM+dHi+/b8klNcqf7smh5Fc4uHniSrbllDGmdzI39ElmZ245q/cWkVlcRauYE+OPdjL6YqvYIG7tn1LndX+Tnlv7p9SJNDXoNMZf1/Vg0Y3EI6QZ+Zv0BDcgqMSH+nHfuc2ZMLYH1/VKYuO+Um6ZuIqiypoFTk3jYLl4gHK7m27JYSSE+REXYuWzG7sztG0MlU43Lo+PNrFBTLmtF6lRAcfyEdRus9lAxyNUn+ycEIKfSYk4CoXixKP5fL6/nyR6CsnOziY+Pp4lS5bQu3fvg68/+uijzJ8/n+XLlx/x+OTkZO6///6/NDuub7UjISGB0tJSgoKC/tE1HA1rM4q54oMlHJ7KmxTux4PnteDJHzbx0oj2JIT54fR4MRt0hPmbiAwwYz6K1YDsEhtb9pexPrOEZpEBdE0KJTbYUst4TqE4HTjVffHvkpZfweDX5vPgeS3qCLPHjRkPAHroOubEnP8s4skFNpqF6Pn4gmP0OMjfDjMfght+EcPpM5jG2hcVin9CcZWTnFI7c7fmomka57aKItzfRLHNxWXvLsbmqh1ZEupn5MtbevLQ1PW8fXVnmkcHHvc2nay+WFLlJLfMwdytuXi8Ps5tHUVssJVQfxP55XYyimys2FNIRICZnk3DiAqyHIxIKbW5+M8vm5m2Zl+d8/774tbc0CcJo77h8WpRpYObJ66qN4J8SOsoHrugJTtzK2kSaiUmxFLHwLjC4aK40oXH6yPQYiA8wFznPMfK9pxyLnp7YR2fHZNex6/39afZPxCKFAqF4mhptKlVERER6PV6cnNza72em5t7XI2MzWYzZvM/v+n/XVrFBvHNrb14bvoWNmeXYdRrXNw+liu6NuGBKeuocLh5/PuNvHxFewa1ijrmsN24ECtxIVaGtG7YRFmhOB041X3x7/L7llxMeh3tT5TRcWkW5O+ATtecmPOfZQxMNDBxk4ucSi8x/scgaEe0kNLvm38444WcxtoXFYp/QqifiVA/E61jawskQVYDU2/vzfMztrB8TxE6Dc5pEcm9g5uzZX8Zz1za9ohRKf+Ek9UXQ/xMhPiZaBlTV4yKDLQQGWiha1JovccGW408fmFrmkb48/HCPZTaXMQEWXjo/Bac2zr6iCIOQJi/mXeu7sx936xj9d7ig68PaBHJf4e3IzbYSvPohkWrALORAPPfS6NqiKRwPybd1INHp20gq9gGSOTRa6M7khCmrAkUCsXJodEKOSaTia5duzJ37lyGDx8OgNfrZe7cudx9992ntnHHEatRT4+UcD4Z040KhxtN0wg063F7fXx6Q3d8QGSAicggM6a/eBgqFIqTz6+bcujQJPjE5cvvngtGK0S2OjHnP8voG2/g660uJm128kiPY0gx1TRI7gebf4QL/wf64ztxUCgUpydmo4F28UG8ProTlQ43PnxYjXp8+IgPjTyYYnQ2Exlo5s4BzbiiS5MD0eN6ooPMaEeZbtwk1I+Px3SlsMJJud1NsNVIeICpjm3AycJi1NMnNYJpd/ahpMoJaIT6GYlSf2uFQnESabRCDsCDDz7IDTfcQLdu3ejRowdvvvkmlZWVB6tYjRkzhvj4eF588UVADJK3bNly8N/79u1j3bp1BAQEkJqaesqu42iIDamr8MeHqvKGCsXpTHaJjXWZJYwb2OzEvIHPA7vmijeOEg6OC35GjYEJBiZvcTKusxl/4zH4GjUdAJumSQWxFuefuEYqFIrTCk3TiA9VkRhHQq/X1TuWPVrC/M2E+Z9ekYDRQRYl1CkUilNGozZCufLKK3n11Vd5+umn6dSpE+vWrWPWrFkHDZAzMjLYv3//wf2zs7Pp3LkznTt3Zv/+/bz66qt07tyZW2655VRdgkKhOIOZuXE/Rr3WYMj5P2bfGqgqhCbdT8z5z1IuSjFQ6YRJm51/vfOhhDaVEvDrvzoh7VIoFAqFQqFQKKCRR+QA3H3pM5eoAAEAAElEQVT33Q2mUv3555+1fk9OTqaRejsrFIpGyE/rsumUEHLcS84eZPuvEBQnP4rjRoSfjgGJBsavc3J1axPB5qOMytE0aDYY1k6GqiLwO0Hm1gqFQqFQKBSKs5pGHZGjUCgUpyu78irYuK+UfqmRJ+YNKnIhayUk9BQBQXFcGdHCgM3t4701jr/e+VBSBoHPCxumnJiGKRQKhUKhUCjOepSQcxphc7nJKbWRW2bH7fGe6uYoFIp/wLerMwkwG+iUEHJi3mDbdDCYIbbjiTn/WU6YRcelqUY+3ehke5Hnrw+oxhoKib1g5aegIkAVCoVCoVAoFCcAJeScBni9PvYUVPLUj5s5/40FDHt7EW/P3cn+EtupbppCofgbON1evluVRb/UCEyGE3CbdVbCjlnijWM4vcwfzyQuaWYgxl/jkT9tuDzHIMq0vBgKd0pFMYVCoVAoFAqF4jijhJzTgMziKi57dxHfrc6izO4mv8LB23/s4obPV5BTaj/VzVMoFMfIrM05FFY6Gdwq6sS8wfYZ4HZAYp8Tc34FAEa9xh2dTGwq8PLm6mNIsYpuB+GpsOTdE9c4hUKhUCgUCsVZixJyTjF2l4cP56dRZnfX2bYjt4K1GcWnoFUKheKf8OmiNNrFBZEQ5nf8T+6shE3fQ3w3sAYf//MrapEaqmdUSyPvrXXye7rr6A7SNGg7AtLmQdbqE9tAhUKhUCgUCsVZhxJyTjElVS5mb8lpcPu0NVk43covR6FoLCxPK2R9ZikXtIs9MW+w8Ttw2yFl4Ik5v6IOl6Ya6B6j5965NjbmH6VfTlJfCE6AeS+c2MYpFAqFQqFQKM46lJBzitHpwGLUN7g90GJAr/5KCkWj4c05O0kO96NLYsjxP3lpFmz5AZL7g/UEnF9RLzpNY1xnE/EBOsbMrGLH0Zgf6/TQ6Vrxydk978Q3UqFQKBQKhUJx1qAkglNMhL+Z63slNbj92p5J6HXqz6RQNAbm78hnaVohI7o0QTveJcG9blj0BliCVTTOKcBi0Hi0p5lgE1z5S9XRReYk9YWoNjDzEfE0UigUCoVCoVAojgNKITjF6HQal3WKp2OTul4X1/ZMJCXC/xS0SqFQHCtOt5f/Tt9C69hAuiWFHuez+2DlJ1C4A9qNBL3xOJ9fcTQEmjT+1dtChFVj9M+VzP4rzxxNg17joDgN/nzx5DRSoVAoFAqFQnHGYzjVDVBATLCFj8Z0Y0t2Gd+tzsTfbODqHokkhfsT5m861c1TKBRHwbvzdrGnoJIXhrc7ztE4Plj7JWz9BdoMh9CGI/gUJ55Ak8a/epn5YK2T236zcUcnDw91M2PUN/A3D02GTtfDojchsTe0GHoym6tQKBQKhUKhOANRQs5pQnSQheggCwNaRKJpHP+0DIVCccJYsruAd//YyeWdm5AUfhyj6BxlsOwD2LMAWlwAiT2P37kVfxuLQeO+biam73bz8XonCzLd/G+glXYRDfidtRsB+dvgu7Fw/U+Q0P3kNlihUCgUCoVCcUahUqtOM3Q6TYk4CkUjYkduOXdMXk3buGBGdI4/PietzId1k2HaLZC1EjpdDSkDjs+5FccFnaZxaaqR//SzUOHycen3lTy10EaBrZ4qg5oO+j8k0TmTLoPtv5709ioUCoVCoVAozhxURI5CoVD8TVamF3HrF6sI8zNx37nN0en+rgjrg+J0EW0ylkv0ht4MTbpCyiAwBxzPZiuOIykhOl7ob+G3PW6+3+niux0ubmhn4qb2JqL8DlkrMVrh3Odg0Wvw9VXQZQwM+jcERp+6xisUCoVCoVAoGiWaz+fznepGNCbKysoIDg6mtLSUoKCgU90cheKs5VT2xZIqJx/8uZtPFu6hZUwgD5zXggDzMeriXg/kbYaMpZCxDCryRLyJSJVKR1FtwGg5MRegOCGUO31M3+3i93Q3bi9clGLgylYmesXp0VVHWvp8EpGz9gvwOKHtCGh7OST3a7SCnXouKhSnB6ovKhQKxdmDishRKBSKv8Dr9ZFVbGNtZjHzt+czc9N+fD64omsTLu0Yh/6oInF8UJEPuZshe41E3zjKwRoMka2h5cUQlgJ6dVturASaNK5ubeLSVCPzMtz8sdfNT7uqiLRqDEky0LeJgS7RemJbXojW9BzY8SvsngsbvgFND1GtILo9hDeDkCQIigX/KPALk7LzBvOpvkSFQqFQKBQKxWmAisg5RkpLSwkJCSEzM1OtdigUJ4DAwMCj8ok6lr44Y3MeMzbl4fOBz+fDB7i9PjxeH26vD5fHh8Plwe72YnN5qXC4cXkavjXGBpkZ0DyMwANROJrbhj5zKZqjFLxe8HnA40bzONDcVRJ9cxg+ox++wDi8lhApU6044/ABuyv9WFkaSKm7/pLxoUYXwTo7/t4KLJ4KjDgx4EWHl366Tdyk/xWDVo/vzuHvpTfjM/mD0R+f0QoGCz69CXRG0OnFp0fT4Q1qgqPPw/j8wv/ynCeiLyoUimNH9UWF4vTgaPuiQnEyUELOMZKVlUVCQsKpboZCccZytCHhR90XdQaSHvnxnzdMoTgFLDDdT6Iu77ie8/bpNj5a7frL/Y57X1QoFH8L1RcVitMDlbaoOJ1QQs4x4vV6yc7OxufzkZiY2ChWPcrKykhISGgUbQXV3hPN6d7eo13tqO6Lh+9/ul/fieZsvn517cf32v9pXzxVnM3fg+OJ+hyPD8fjczzVfVF9F44N9XkdG43p8zpdnnNnAhMmTOD++++npKTkH51H0zR++OEHhg8fflza1ZhQZgzHiE6no0mTJpSVlQEQFBR02t90qmlMbQXV3hNNY2vv4VT3xYZo7Nf3Tzmbr19d+8m99r/qi6eKs/l7cDxRn+Px4WR8jie6L6rvwrGhPq9jQ31ejY8bb7yRkpISfvzxx1PdlLMS3V/volAoFAqFQqFQKBQKhUKhOB1QQo5CoVAoFAqFQqFQKBSK48Lrr79O+/bt8ff3JyEhgXHjxlFRUVFnvx9//JHmzZtjsVgYOnQomZmZtbb/9NNPdOnSBYvFQkpKCs899xxut/tkXcZpjRJy/iZms5lnnnkGs/n0LwfbmNoKqr0nmsbW3mPlTL++v+Jsvn517WfntR+O+iyOD+pzPD6cCZ/jmXANJxP1eR0b6vM6M9HpdLz99tts3ryZiRMn8scff/Doo4/W2qeqqooXXniBL774gsWLF1NSUsJVV111cPvChQsZM2YM9913H1u2bOHDDz9kwoQJvPDCCyf7ck5LlNmxQqFQKBQKhUKhUCgUiqPmWDxyvvvuO+644w4KCgoAMTseO3Ysy5Yto2fPngBs27aN1q1bs3z5cnr06MGQIUM499xzeeKJJw6eZ/LkyTz66KNkZ2cDyuxYoVAoFAqFQqFQKBQKheIfM2fOHF588UW2bdtGWVkZbrcbu91OVVUVfn5+ABgMBrp3737wmFatWhESEsLWrVvp0aMH69evZ/HixbUicDweT53znK0oIUehUCgUCoVCoVAoFArFPyY9PZ1hw4Zx55138sILLxAWFsaiRYu4+eabcTqdRy3AVFRU8NxzzzFixIg62ywWy/FudqNDCTkKhUKhUCgUCoVCoVAo/jGrV6/G6/Xy2muvodOJJe/UqVPr7Od2u1m1ahU9evQAYPv27ZSUlNC6dWsAunTpwvbt20lNTT15jW9EKCFHoVAoFAqFQqFQKBQKxTFRWlrKunXrar0WERGBy+XinXfe4ZJLLmHx4sWMHz++zrFGo5F77rmHt99+G4PBwN13302vXr0OCjtPP/00w4YNIzExkZEjR6LT6Vi/fj2bNm3i+eefPxmXd1qjqlYpFAqFQqFQKBQKhUKhOCb+/PNPOnfuXOtn0qRJvP7667z88su0a9eOL7/8khdffLHOsX5+fjz22GNcc8019O3bl4CAAKZMmXJw+9ChQ5k+fTqzZ8+me/fu9OrVizfeeIOkpKSTeYmnLapqlUKhUCgUCoVCoVAoFApFI0FF5CgUCoVCoVAoFAqFQqFQNBKUkHOM+Hw+ysrKUIFMCsWpRfVFheL0QPVFheL0QPVFhUKhOHtQQs4xUl5eTnBwMOXl5ae6KQrFWY3qiwrF6YHqiwrF6YHqiwqFQnH2oIQchUKhUCgUCoVCoVAoFIpGghJyFAqFQqFQKBQKhUKhUCgaCWetkPPee++RnJyMxWKhZ8+erFix4lQ3SaFQKBQKhUKhUCgUCoXiiJyVQs6UKVN48MEHeeaZZ1izZg0dO3Zk6NCh5OXlneqmKRQKhUKhUCgUCoVCoVA0yFkp5Lz++uvceuutjB07ljZt2jB+/Hj8/Pz47LPPTnXTzipsTg8lVU48Hi9lNhdZRVVkFVdRbncd3Ql8PqjIg/IccNtPbGMVCsU/xuGSPu90ew6+5nR7yC6xkVFUSX6549hPaiuWe4C97Di2VKFQKBSKswOv10epzUWlw113Y1WRPGMdFSe/YceIz+cjp9RORlEl+0tteLyqepvizMZwqhtwsnE6naxevZonnnji4Gs6nY4hQ4awdOnSU9iyo8Pr9aHTaeSV2ympdOEDQvyMRAdZjnic3eXB4/Xhb5Y/uc/nQ9O0Oudt6FgAi1FfZ5vH68Pp8WDW6/EBRZUOfECA2YCfqebrlVNqY+v+MlL8ncT4g0Gnw+Vwk2kz49NbeHveLpKD4IpWfgQFGXGYLXh0RmyGEFxeH1ajDrO7AoO9EJ+9FL0lEJ/Xg95RDlWF+CzBEBwvDxuDCY81EjcaRo8dnckfTaeTbToDWEPBYAH/cDweL26nDZOjCE1nBI8TdHrZx+QnjXdUgqtKjvULlYeZ2w7mQDCYj/6Ppzirqe5jJVVOiiqdONxegq1GogPNaJpGlcuNSa/HZKhfXz+8z/7V60eDzeVGp2mYDXX7djVuewWuyhI8Pg2T2YTBWQ6aBpYQdH6h4PWC20a5S8Pm1kDT0Os0qhxuAvVOzK4S9Hix6wJYV6ChaZASGXDgXqExY8N+KquqiPWHco+JHs2iaBMbiN3tRadBkNWIw+nFDZh1HgKdhRjshejMAWAORMvZCFkr8NlK8XYZgxbUBJ3mw+d24LOVgMGCz+iP5nOheV1olQWg6cAaAkHx4HWDvRTQQG8CtwOMZvALr+nrJn9clUXoAH1AhPR7r1c+IN1ZuR6iUCgUZxRejwfcNnA58JisUFUCXg82UzhGrw2Tz47O7QB8Mh7UGaXMu96AzlkOPi9eoz8uSzQeZyV6jw2d2yaLDdYQHJZIsJeiw4vOEozNqyfAXYzBXQkFO2S/2I5UWaJJq7JiNekJMOsJ9FWitxWic5bhMQVRoQ+hXPNHp+nYX2qjsNJJamQAUUFmwvzNeLw+9pfYWJ1RTESAGafbw5yteQRaDFzaKZ74ECvBViNOtxev10dhpYOZm3KYuWE/fmY9N/drSouoQMK0cqy5q9EWvgJl2RDXBQY+QVVQU3JtOhwuD4EWA1FBFoz6o3sO7iuporDCyW+bc6h0eDi/TTTNogL+cg5zNBRVOvltUw5vzNlBXrmDcH8Tdw5sxvDO8UQEqLG64sxE8/l8Z5VcmZ2dTXx8PEuWLKF3794HX3/00UeZP38+y5cvr7W/w+HA4ahZJS4rKyMhIYHS0lKCgoJOSptLq5zkljuYsyUXo16jR9MwCiucaBrgAzSNJiFmgv3MWIw6bE4PRZUu3F4v/mYDRr1GfrkDp8dHdKAZk0FHqc2F1aTD59OYuy2PNXuLaRblz8iuCeDz4fL48DPp8PnA7fXh88nczajTUenyYNBp+Jn0ZJfYcLp9RAaa0Wk+csoceH3QKlxHoKcMPW4ozUIXEA1eJ5q9DNIXQmAsxHbCl7cVX0IvNJ8bFvwPLW0eWEKg+y0Q1xmnV8cGVxPahDixrnwPzeQv2xN6iuCSvhgSe4HLBrYiCIwBZxUERIE5SAQbWwHojDJJqyqC/K34CndB6nkQkggeF5rbAUYr+DzgdePzj5KIH69btrsqQW/EZw6Egl1o+MDthKA4qMyTCV9EcyjNgn2rITQRmvQAvRl8bjm3X/hJ+b6cqZwOffFIuD1e9pfaWbK7gB255QxpFU1ShB+VDg/7Smz4mw0EW408/eMm9DqN63snER/ih14Hep3Gm3N2otdpjO6WgEmvERtswePz4fGCTtMwGTR0mobN5cGo11FY4eS71VmU2V0MaR1NYpgfIX4GIvxNaJqOcoebgjI7TcL9sDk8OL0+DDoNDcgts2M16THrNYwGPaHeIqzeCvQ6A26DHyVaIJqmJ8yRhbbgf2iZy+DiNyBzOaz6FJwV+Ib8B5oPgaK9aHv+xJcyECKa49OZKfH5E6izoy/aia4sC7b/hs8/AmfHa8nQ4vlwURa39U0k3lyJVQ+a14nmqsKHAazB4CiFgp0itgTGHoi408AchGYvBWclGC2wYxZUFEBCN4hoCQFR+Mr3o/l8cp+xhkFyP9izAAp3yb2j1YVgCgRNh8/rBp9XRFyzv4i9Xg+Y/PEBeN1oHgc+k7+c016Kzxwk94ayLLn/hiaimQIBH5RkiMjjHyX3IP0BMdtWChU5sHseeF3QbLBcl1/YKfmu/lNO976oUJwtnG190evxyBjPbUPzuuHA2Ay9FTQ9OIrleaG3yP3XeSCKRG+G4nQZ0wXHy++2ItjxGxj9oO1wtLJsWD0BHOXQ5lIR+pe8I8+2NsPRds+F5R9ASSZEt4NBTx5YDHDBig9h43dwydv4dAa0dZPB44KWF0BQAuh0+PK2oq2ZCPYyPL3GQcpA9LZimHaTjGEP4Gt+HiWDXuaiz/fw040pRP75KNrO3w9u9zQdSNWFbzH0091kl9ZEow9oHsnLIzuQU2bnmo+X8eqojnyyMI01GSW1PsO7BjXj4vaxfDB/N2N6JXP312vILasdDfvqpU25tHwKpqVv1v4DaDpKR05l9O8mtudUEGg2cN+Q5lzRJZ5Q/yOLJRlFVUxdmcm783bVer1HchhvX92JmGDrEY8/EnaXh48WpPH67zvqbLuxTzIPn9+CAIvxb59f8fdIT0+nadOmrF27lk6dOh3VMRMmTOD++++npKTklLajsaCEnAM0JOQ8++yzPPfcc3XOc7IekmU2Jwt3FnDvN+vQ62Dq7X0oqXICsG1/OWjQPzWC3DI7ny9J59b+KazeW8zni/dQZncTEWBi3MBUuiSFYNRp2Fxe/jN9C16fj5dGtOePbXmkRARQ6fSQEGplxsb9/LB2H1VOD80iA3jsgpZkFFWxNK2QC9rG0KFJMK/9tp2sEjtjeicB8OSPG7lncHN6p4STmVfIedGVBK18A604HYa+CMveh+0zwRQAl7wlD9LN38uEp/P1MtnSG2HvEpl8Za8VYeTKSRAQjU9vAmcFmilAXneWy7nMQSKa/HKPTOxAVtq73wKhKbBiPPS8DZL6ywQ0NEnElN3z5IHfbCDkb4fwZuBxi4gTEAW2Etj4rTyE214OJn+YdotMKnveAT4v/PYkxHWSB7k1XI7dPQfmvwKB0dCku7xHx6ukXUartNlgkUGGTi/tt4bIe5fvlxWZqkIZIARGK+HnME51XzwSXq+PdZklXPvJcsb2Tebi9rGYDDoW7yqg3OGmVUwQm/aVkldm5+b+KVQ53bw+ewfzd+bj80HnhBCevbQNYf4mvD7ABxVONy//uo2Fuwrw+aBPs3AePr8l/iY9X63IYOLSvQzrEMslHeNwe3z4m/WE+5vJK7fRKtIPg0HD6rNjdBSDy4bOaKbCrcNljiDK6kVz28HjgNzNaL89ISKkpkGzIfgG/QsswWgL34CE7hDTHrJXw/z/iUgx9AXYNhN2zwVLMPS6E0KbwtJ3IaYDvlYXo638GNLmS3Tb4CchIBYqC/BENEdnK4Y1E9H2LhHxtdc4ETY0HZRmyEC6YAes/EQG7Ze8CasnSr/qdI186LYSiZBLXwKL3xBRtc+9MkgvSpd22csgZwOEJkNwEyjbJ9eiaZKWaS+V1z0H7knOckgZJCLtmgnQdgTEdoD1U2W1tP0VsOIj2L9e2tv7bkjoIW3UNOnPjnLZNyBG3tdRJsf8+WLtL03nMXDu0xAQefK+qMeJ07kvKhRnE429L3pdNjS3W8ZFznIRRaqjIw1mec1RIffV7TPRPE5oMxz8I2D205C/BSJayDMoJAnmvQhZy+HyD+H3p+V+H9Ua5v5Hoi1BomkGPCrjxsVvyTiucCdsmFq7ceHN4IKXYMdsGbeu/rzuBVz2vuzndkBlvghB6Qtr7zPiI1gzURYeq7n2WxkDThwmY83D8PV9gG0pN9J8ySMYds+us93TdAAT45/lP3P213r9ht5JOD1e0vIr6ZYcxnuHiSbVjL+uK3O25uDzwbQ1+2pt0zT448YEmn5zjixaHE5oMrN7fcFtP2QdfOn54e24pkdig5H95XYX6zJLuP7T+gvLPDq0JXcMaNbg8X9FZlEVQ16fj8PtrbNNr9P446EBJIX7/61zKyAzM5NnnnmGWbNmUVBQQGxsLMOHD+fpp58mPLzhuYrH4yE/P5+IiAgMhqNLArLZbJSXlxMVFXW8mq+EnDMJp9OJn58f3333HcOHDz/4+g033EBJSQk//fRTrf1P9WrH7vwK3p27k4s6xGHQQXyIlR/WZjN+we5a99cxvZPo3zyCmRv388Pa7DrnuXdwqkz63l3MwJZRPDK0BXsLq/jgz92sySjm+eHt+H7NPlbtLa5z7CsjO/DJwj1szy2ne3Iozw9vx8VvL8Lt9TGiczz3DU5Bq8jF6KkiypuH/uvRInYM/wAWvS4r6wBXToYFr8gk6FBaXwrdb5WQVq9bVjjMsmJOSaZM6JoNkhWPNZMkzcloha43iZjy4x2yin4oF70ik8A+98okK7oNbJwmqzcpA+TBuX4qXPyKrKKs/hz63g/522D917XP1WwIDHkGfrwTcjdB+1EQnCDXpung8vGw6A2ZuJ33HxFj9EYRbox+ci3Z62QFP6EXTLwEHCVy3vOelZWkeS+IaBSeUvPg9I+GwMY3yTtRnOq+eCT2l9h4fsZm7ju3BV4fbNlfxqPfbcB9SH720LbRPHheC3JK7Tw4dT2Flc5a5zDpdXx6YzfumLSaljGB3HZOCvO25VFud9MlKRSzQYfd5aFzYigjxy/l3xe3JrfMwRdL03G4vZgNOiaOSqJbaAV6r1OizywhaEGxUJEvK5NBcfiKdqNtnyURZDHt4MtRdS8oOAHG/AylmVBZIP3SP0LEEr0JPj0fqgpqH9NmOPR/SAanJXvBXgJoEN5cIl2C4sFkkQHvdzdDziH3gWbnQp974NdHpb8DxHWGgY/Dny9B3lYY/QX8dBdc/5MIoV6X3Ge8HjCYIG+b3F/yt0nf6/cAJPWRflucLtE4V3wCxgDpu7t/h1GTIHuN/H4ocV2g/4Mw5TrofB1EtgL/SLnXHP7I7H4LdLpWrjFvMywfL5+ZfwT0ewhSz4X3etT/xRk9SVZ+7aVyHZaQI6dquWxyf/H55L5iDWl43xPI6dwXFYqzidO9L3o8HjRHKZrLLuMuj0MEFLcdNk2TqJo+d8OKj2HNFzK+i24nzxJHOUS3lfHZ2sm1T5zcH7pcD9/fVvPahS9LJHTxXvjjP1CRK/f8r6+uv3FXfArz/g/OfQq+vbH+fbrfIgtyn55Xv6jhHwHXfi/P1/L9MO3m2tutoTIenXZLzWtxXWRBwueVZ159WELw3jwH3Xvd6t8O7L16PgM+ry3CmA06XhvdkYIKJx8vSGNfia3eY0d1bcJF7WO5+6s1VDprj5+jg8z8PLiI6Fm3Nvjeu0bNZcik3IO/h/ubmH5vP2IbiKrJLbPz1pydfLUio97tMUEWfrq7799OsdqQVcKl7y5ucPu3t/eme9PGGQF7KKVVTgoqnJTZXQRZjUT4mwj2M53Q90xLS6N37960aNGC559/nqZNm7J582YeeeQRnE4ny5YtIyys7mfrdDoxmU5s246WM1nIOeuS+00mE127dmXu3LkHX/N6vcydO7dWhE41ZrOZoKCgWj8nk5JKJ1FBFu76cg2/b8kjo8jGB/N313mefLF0LzHBVn5cV1fEAfhoYRrFVS4+HtONVjEB5JQ5uPWLVazaW4zVqCfEz1SviAPw7rxdXNsrEYCV6cWsTi/mul4SjeOvcxKXN5/Eb88ntmQN+pkPycPJP0JkfY8bhr0J106TaJRBT0LXG2VbNVt/llQKTQe7/5AVine6yKRJr4em58DCV2H5h/KQB5nQLHtPUiu630IdVn0mr4c1g9zNMOFiWP0ZbJgCP46DtZNklX/BqzKJ0xkgKLauiAMSaZO1QsSmaw6szCf2FLHG55UVoP4Py2T3h9sBH2z9RcJ68zbLSr81SNIprBFwy+9w51IY9C/IXC2rOIP+JdEFeVuhPFvCfTOXQuFuWcXJ3waVhQ1/Uc4CTnVfPBIlVU7uObcFr/y2g/2ldh76dn0tEQfgt825bMkuY+O+0joiDoDT4+Wr5RkM6xjHmowS7vxyDee3jSEl0p+deRVEBJppER3Id6uz6Nk0jECLgV15FcSHyMDp4xEJdHetxLB9Btqk4WhTr0f74hL57ruqJGVpynVoU8fA+q+kv815tnYjNB30vguu+gqKdst3PDQRNnwNX14BHw0U0XLEhxK9dihbfpTB7NK34atRsOhNEVS2/AgF26AiW16b+xx0vBJGTZRoHKMf9B4HX46sEXFAIvO+uxkGPyWD/+J0uOUPETq2/ix9fN1XImwUpskAOn+b3C9GfibXsuVnGPIcjPhYxJ6f7pIUqt53wa3z5P0PF3FAxJ2sldB0gEwgEnrJSmt4al1frFWfiqiUvgj+eF5EHJD///YEvlWfidBTH0vegsyV8NWVMOlyWPqOiNf1UZIBMx+Ve+Ob7URkytko0UMnmdO5LyoUp5L3/9xF8uMzWJZ2cp7Xp1tf9NrL8ZVm4yvYiW/bDHRznkG37ku04t1oZdloFbloqz9H+/wCidpseSFMu1UE8OrxXe4m+G6sCNWV+XVFHJCol9J9ENup5rV5/yep7mHJUJQGrS6WhbqGWDtJngU760a8HGTz9yLgNLTmXVlwYPyqwYZv626P7SjR5ofiFyZj2JL6RQ0A7CUSfXQE/zujq67Bv8PtRUPDYtRR7mi4cEi5w41ep1GfF7Db45NI+CPg02pPHwsrndgOE4QOxev1UnaEQialNhfefxBXcCSfPwA/85G3NwayS2zc/fVazn19Ppe/v4RzX5vPPV+vJbsBse54cdddd2EymZg9ezYDBgwgMTGRCy+8kDlz5rBv3z6efPJJAJKTk/nvf//LmDFjCAoK4rbbbiM9PR1N01i3bt3B8/388880b94ci8XCoEGDmDhxIpqmHUylmjBhAiEhIQf3f/bZZ+nUqROTJk0iOTmZ4OBgrrrqKsrLyw/uM2vWLPr160dISAjh4eEMGzaM3bt3n9DP5XThrBNyAB588EE+/vhjJk6cyNatW7nzzjuprKxk7Nixp7ppdUgrqOTDBWk4PV56pYQxZVX9g3yzQUdemb3BZ43d5cVk0HHfN2vxNxv5zy+b8fogKtDMI0NbsrewssE27C2sIvyQ3Nfv1mTRNzWcQLOBBzprGL+7Xlb//cJlJR4kzaIsFy76Hyx+UyaBX46Eb66WB+LFb9S8QXATSddY9blE0TgOdM6MZRK94vOIAFMfm3+Q1fzDKcmEsFSJClgzoe723C2yIrT5e/m9xQVyroZYPREq9kv72wwXUSWihaysXPB/ssIU2RJ63CaTw07XQdEeMU/e/D18fpGIPJ4qGWzkbpaIBq9T2ugsl0nptJtlUle0SwSj9MXyM/9/8tnaSuq2rTxHUsQKd8sxipOOxaTnzd93UOVys+BAulR9bM8tZ/meogbPs3pvMa1iAgHpJi/M2Ep4gJkpKzO5c/IaduSWkxBs4JNLwhla8SPjQyYxpU8Wc29OoWdAPnoN6W8el4ipF78OF70q0WD2Uuh3PwREy5uFNZXvYTWaBpe+I/3vowHw7Q0ysJ4wTNIRfT7pM2snwcxH4ML/1b2ArT9D/g6JThvwKEy9TrxhKvPhi8tE9Nj6C/z2L/j9KbjsPWg/Wgbq3nqqZTgrYNccSXcqyZTfczZJZJBOLwPiwBgRdAESe0u65pTrRDBa/oFcx7wXYPiBiULxHvj6SokUrE+4rWbDFGhzGaQMFKG3w2jocCWM/FxSovQH8u19PlkB3vZLvafRVn0KrS+p/z0qC2HvIshYCvvXSSrAZ0PrDvBL98m9cO0XNekB6Qvhk3NFcFMoFKcFm/fJ5Dotv+Ex1ZmGt7xAhJu1X6Gb/yJa+gK0oj1ofzyPtvQdmP1vmHyFjKEylsqiGUi0is4ownl97Fvb8NgPDtyjL6353V4qwnb189c/UqJKG6IkQyJIPUcQwz0uuf8fCb1J9vHWcx6PUyLID6Vwt4yXE3s1fM7Ilnh1+oYFJMBlrCvaRQSYsLncrM8soU+ziAaP7Z0SzqKd+ZzXJrrOtsJKJyUBzeS66iO2E6vyagtMFqOuwUINAEa9nl4pDafg9G8eQYD579ffCQ8w0SI6oN5t8SFWIv7Cv+d0p7TKyWPTNrBwZ+1I6AU7C3h82gZKq07Mgk5RURG//fYb48aNw2qt/T2OiYnh2muvZcqUKVQn97z66qt07NiRtWvX8tRTT9U53549exg5ciTDhw9n/fr13H777QeFoCOxe/dufvzxR6ZPn8706dOZP38+L7300sHtlZWVPPjgg6xatYq5c+ei0+m4/PLL8VYXpTiDOeuqVgFceeWV5Ofn8/TTT5OTk0OnTp2YNWsW0dF1b2inkvxyO+//WTNIt7k8DZbndXm8hPkfWUE3G3Sc1yaG3s3C+b+ZWxndLYFzWkTw68b9dE0KbfA4o16rtSjgcHtxun2M7hhO0Kq3ah40miYCTserZOU6PBUmDa8tLng9kiscGC0TuFYXiRHojl+haT/ofC3Mf7km/crrkeMPT52qxuetv/R4TDt5QK8YX/9xATGyYnPwIi01AlJ9OMrBYJV2zHgAxs6C4ETocp2sJlUb6oEMHi4fDzMflkHAkOdk383fw7IPJOXj+1tEfKnGL0zSLNpeIZPfdV9JKkr+Nhj4BOQkiFnr3iWSEhPdVgZBe5fAjIckisfrkVSSYW/KPoqTRpXDw29bcrmwXcwRS2jnlNqICmw4dDg8wES5vUbQSCuorBVqvCa9gDd72TB/Ourg4DOSiUT6R8L1P8CWqRJeHpos6VHfja39PQtPFe+A72+V77RfmIiwIObfBTskvB2kf66eUH//KkqTsPWI5iKImPwhvptEwOXvgJ63S7SP2wEtL4KvRtc9R0mGeBJ0HgM/39XgZ8L+deJz0OoiuRfMf0n6QjXz/yfeW+X7odtNEmp/+MC8OF1Enc7XiW+O3iTvby9p+H0d5eK3034kfD60RkABEY8v/1CEV59Pog2rV5MPx+OqfeyhJPaU1edDKdsHy8bDkGclighk8lOcXvd4t0MiAoe/D+b6B7EKheLkUV2+2eZqODrhTMFXniuG8OX70b4cWXusZwkRb5hZj8vzwu2AX+4V8V7TSSRmRHPIWNLg+bEXy/26IRzldUUSTasRIIrSILo9ZNbvyyLeb2tkoaChyJ3m58vzxBxY/xgxpoNci71EbAJ2/1F7e9ZK6HsfLH2v5rXiPfLMDIqXZ3V99/Z+D5JtMxObMgRD2pw6mz3J5/BHZt1J6oPntaC40slP67IZf11X/tyeh91Ve7+UCH9C/Yz838y9fDymG4t3FdSJEp6bqZF84VtYpt9Z+w3MgWQPeJW3f6i9IHV1j8S/HNt0SQyhaYQ/ewpqi5wmvY4HzmtB4D8wI44IMPP+tV246qNlFFTUXEuQ1cDHY7oSHfzPq2KdSgoqnHVEnGoW7CygoMJ5QlKsdu7cic/no3Xr1vVub926NcXFxeTn5wMwePBgHnrooYPb09PTa+3/4Ycf0rJlS1555RUAWrZsyaZNm3jhhReO2A6v18uECRMIDJSFzuuvv565c+cePO6KK66otf9nn31GZGQkW7ZsoV27dkd/wY2QszIiB+Duu+9m7969OBwOli9fTs+ePU91k+rg8vhIPyRSZlduOT0byPE0G/T4mQzENXCzah8fjNGgw9+sJ6uoim5JoXRoEszdX61lxsYcksL9MTVQPvCCtjHM25538PdLOsTxw5osmodqGPK3HNKIIDjnYdj+q0wOi9IajhDZ8rOkU/z6GEy/T9I1Zj0hUSsDHxdPimpMf2FQVt+qQc87oTK3tsByKG577Yox+9ZIGkVDpAwUY2WQiVn+NhGspj9Q9z0q82US2/NOSfv45R5ZyTcFyGRr7n9qT65BPq/vbgKPXYxkzUESLdH7Hlmx+vMlmXzPflImxR/2l3PsmS9RT8M/kFSVrjeKsNNQeobihFDucOPzwc7cCjomhDS4X4nNdTBNsT5GdU3gl/U16ZE6TYz6qrm9ix/madfXFSoq8+GX+6HbLeI1kLMJfr6n7vescJdEwlz3vUSDdLmxZluHA8JNNU26w57DTBsPJX0RxHaWdMnLP5RBefEe+Q4m9hZRKK6TRNY1xOYfRCwJjGl4n8BY8HhknxUf1hZxQCJ5pj8Ave6S/taQoLJrDiT1FRHZUSb3qZYXNfy+TQdIpOAv99UVYnbPFbG56UBJtQpLqUmpqo/D07FAJiAdrpR74eFsnFLjQeT1wJaf6u5TTdq8A+XTFQrFqabSKUKO/QwWcryl+/DtX4+WvhDNUYb2/a11x3r2Eonc7HNvzWuOchkvZa2Q54ujQha+GiJnowj4DZEyALJW1fweEC3jJUepGB/vnC3pVfXdf3V6EfbXTJRnSEI9cwBLiOwz7wUY9kbdyBxrKFz6dk3UjiVIxKFD8bhEzBn4eO3XZ/8bfF58V0+B5ueJuAUi7gx7k5LwTtzw7V4y+72IL3VI7VMmn4P94ndZleM7uMgaZDXw7KVtuKBdLJd3acLD57fk00VpjL+uK4NaRqHXafib9Iztm8yLI9rz7C9bcLi9PP3TJl4f3ZGb+ibTIjqAzgkhPHNJG8JDg3hxTzOyrpqDs/NYaDYY37nPYL95Po8scLH/kGpZg1tFcceAZkeMyNE0jYQwPz66viujuzXBYpR9+zQL5/txfWgW9c+NiFOjAvn57n58PKYr953bnPev7cKv9/andWzjT/09UloaiJn0ieRo7XS7dWvY0wlg+/btdO/evdZrPXo04CF4CMnJyQdFHIDY2Fjy8mrmpTt37uTqq68mJSWFoKAgkpOTAcjIOEL64hnCWRmR01gw6XWkRASwO1+EgnZNQgj3NzF52d465mRmo45ym4vnL2/Pg1PXUVJV06ljgy08MrQl5TYXny9Ox+HycvfgVB6aWmM2+vnidF4c0Z7Hv9+Ay1PTYVtEB3BF1ybcPklEjCahVno3C+elWdtIDYnGE9YMfeEu6Hg17Jorng/+EeLiv2d+wxfX4UqZIFXk1n7dUS4T0iHPikkpyIpMbMe6JskAUW1k8lZNQLREsOz6HWK7yArJ9l/rHmcrEo+P6oiEfatFhApuItV7DsUSIuG7h5rClu2XqJ+KPOolZ6P43oCs2K/7CtqNkMHL4jfrP6YiVyaMwQkyMU6bJxPO35+uu2JTVSTpI5ePh8kjaqoe+IVLOk3uZghJqP99FMedMH8TOg125lWQEOpHVKCZvMMiczQN7j23Bfh83D+kOW/O2Vlr+0XtYzAbdOzMqxEGB7SIYslu+X77m/REe3LkO3E4eqN8l03+kvqX1E8GyyM/k+ppOr2sFK77Sr6bXpf00dBkKNgO22aIIFotCAx4DKJaHaj8VFL/RZuDoNUlIjrMO2Q1Zel74glzzRT5PmetbPiD8zhlAtDt5oYFn45XyeDc7ZR2NnSeot2yX0P4fLJyWn1f2rdKxKfIlnUFL4NFhNjdcxuOBlzzhYiurS+B4jSJdjpcZIIDn3NTSD1fDJZ9PhG6Bj4Bf/z3yGH9IH87/yNUb7CG/HXov0KhOClUVEfkHMEvpDHjK92Htnsu2qI34JK3axbt6qNkb10vNVsJ6EzyQMzbImMinb7++2xkS4moCUmqSduvxhwohSe+ulJ+1xulwlRokhgXD3tDxJKFr4mp8ZxnJKUJICQRLn5NInVcVTLGunIS5GyB1Z9KgYwWF0C7K2RhpNm50s6bf5dnUPEefIl9REjS9CLC5G/FW1UCIz5DS/sD3ZqJ4HXhbTcKb+tL8epM6JtfgG/NJAxV+bhaX06pLoLscj1tL3wV3YHqXF5TAOX6MGZneHjiolbYrVayB79N6OAyjK5yPKZAKg2hlGoBPDksiocvaIXT7SHAYiQ60IzhwILsjX2Suah9DG6vjxdHtMPrk4WhMH8TxVUuHjm/JZ8t3kOlw828bXncPiCFcQNTcXm9FJQ7mLR0L26vmX2mBAKHvITJ6EUzWjEDr46OI6vERlGlk6YR/kQGmAn9i4wAgECLkUCLkUeHtmLcwFT5U1iNhBzFsUdLXIiVuBAr57U5wuJQIyToL6KV/kk005FITU1F0zS2bt3K5ZdfXmf71q1bCQ0NJTJSBFl//xNTGcxorH19mqbVSpu65JJLSEpK4uOPPyYuLg6v10u7du1wOk++h+DJRo3+TmMiAs08MrQFd0xeg6ZJ1M3Ls7bz9tWdmbMlh5gQPzSkYs5FHWIJ8jPy1E8beXFEe4oqnGQWV5ESGYDFoOP5GVt47AKJcvlx3T6u6BpfK5Ry0a4CdBp8dH03tu4vo8rppmdKOCa9jmd/2UxkoJnLO8czrEMc787byfBOcfRoFUel/n6Cdv4mFZe+OVAZoM1w2PANJPZp+OKi24iHRX1U5Er0is4gKyXpC+Hyj+T8hw4YQpMldDdjOVz1pQwEQhJFZLGXyWQsrkv9EyydQSauoybKQMBVBTMeltWVjdPEoNXrlod51xvF0+NQD4/4Lg2nS1RzaEnJ/C0y6LEVHzHnGVuxlEtO6AV6s6xWdRgN/uHiA3LoYKdsn0y8Dz1fVaFE7ty+QHw7jNa6AynFcSfUz8SwDnH8vD6b52dsYcLY7rz46zYWHSgdnhLhz0tXtMek17C7fPRPjaBzQgjrsuTv1695BAt2FPDUTzVpNnHBFm7qm8ydX4p/gNmoR3PUE2HW8w7ocoP4LHmcInxoOmjSU0RRR5kMlFtdAmN+ksi30izxjtEZ4OY54itjDZWfbjeJ79Jv/xJT4vn1eOGAHFOWWddbqkk3GQCv/1pWSy98WUq81kdSH0j7U1KMet4uZbqrv886PZz/f/L7ngVyzvp8dKqxlcj5GiIwVkTSdV/Jv899WsqUj/hE2rp2slS1azZEqqiUZDYs1IL01fiu8vlkrpD+/cvd4mdTjV+4eBTZiiAiFfreLSmamctFTGtI5Gp/Jfgd4m/Q9QZY9Un9+/Yap/q4QnGaUOmQZ/SZmFrlK8kAZ6VUmuo17q89aKBuae3wVAj1iLkxwMqPZXz3/a2SKl9NdDsRagx+cNm74oez+Qd5xrW8SBYbdswWn5nIVtDxanx+YVCYhtb1Bhn7jD5Q5dTnhaun4nNV1gj6BjNEtEJrPxqfrViqKIYkwdVT8KGBOQjNVSmCu6bD5/Xg849G63YLGC149BbcmhGH24PT7cMdFY0uxofLA5Y2NxDQcjjgo1wXxOqMUj74M412cYGM7v40bq8btwc6RoTQwSTTsJxSG1M3ZfH1ygzK7dl0aBLETX2bMnVlFsv2FHJLvxTOadGUyEALFqBhtxlBp9OIaaCCVHSQnqt7JnJe22g8Xh+hfqZa0TSxwVb+Nyqk3mM1IDbESmxI/ec+GiICzUQENm7PmpNNRICJc5pHsKCe9KpzmkcQEXBiqkOFh4dz3nnn8f777/PAAw/U8snJycnhyy+/ZMyYMWhHMOU+lJYtWzJz5sxar61ceYTFvqOgsLCQ7du38/HHH9O/f38AFi1a9I/O2ZhQQs5pTs+m4fz74ta8/vt23F4vGUWSNpAQ7s+Pa/fh9cElHWIJMBuwGnQMahXNnZPXEBtsISLAzLTV+8ivcPDC8HZMOVD2r8rpobiqbhjegp0FLNhZQPOoAP4zvC1er5fIABMfXNuF1XtL2J5bRpXdxeMXtMLl8fLSrG0s8zNw5wXvE1qZXyMyWIJlgtLyYnnQV+bXvTDjX6i2zgoRWiJbSVRN1mq46mtJNyjcJYKNzytlJQ8dSES1lnQkvREyl0BQjDzMV38OG7+VSgEtL5LSljozWMNklSV9oaSFlGVD77uh771ScWvVJzD1ejEzriahl6SDOKtkVak+YcZ0mF9FaFOJOGh1sWxrKOUrtoNMDnV6Kcc+eYRMytuPgtsXiXHs7nliCu2qkvduO0L8d6onuR6nDHoylspk8/wXIKGH/F0UJ4TIQDMPD22B0+3lty05ZBXbaBEdyHW9kogIMBHub2be9jy+XZWFw+1hQItIzmkRyer0Iq7ukcjCnQV0Sgjhlv4pFFY46JoUStc4CwGajeHtwvh+YyHFVU7cYam133jgk9B8iHwfNn4r35nU88SHadsvNdE7Xo+Ik2XZcP5/RbAJihdBpzRT0vrajZQolOg2Eu0F0ON2iR7JWFr7ffvcKybkaybVft0aKgPsKddKXwNJY2x+vrxfs8GAT+4Ja74Q74DpD0oknMcl3lM5G+V7HdlCvsulWRIxF9dZ+lHxnvr/CMl9RfxMPU8i8g5n4OMi1rS5HFpeADMelEg3nV4E22FvSlh8ZQGk/SHCUMwRcquj24HbJfepi1+Ta7rkHfkMMleCwSi+QdMfkPD8DVNg2fsiprUdIe+bej7sOqxiSlA89Lqjxh8H5H537rMw99na+6YMFBH9KAdRCoXixFJ1hqZWeUuz0NZ9jTb/xZoxz3XfSwS23lR/ZKHeWDutqcUFMiaJai0LDpumiTgT3x3GLZOxTUWOPHOCE+Rev+5NGUv2vBP63i8RMAaLPH+63gCdr8dnMIPbjs/rRZc2TwpRbJsp1T/Lsw+Ot7wXvkZB62updHgwoSPA6MNscGEMbYo+oQd55Xa+Xp5BZJCFBdvT2JlXiaZBVomNKzoncEPfSFpGS5qODjACVlND0yjrwf92aKJjTO8k5m7LY8bGbEZ1TSAu1Frr2JhgK7cNSOGKrvG4vT78TQYCLQY6NAnhnnNTCTsBZr0RAUpMaSwE+5l46YoOPD5tQy0x55zmEbx8RYcTWoL83XffpU+fPgwdOrRO+fH4+Pi/9Lc5lNtvv53XX3+dxx57jJtvvpl169YxYcIEgKMWgw4nNDSU8PBwPvroI2JjY8nIyODxxx//6wPPEJSQc5oT6m/i+l5JXNAuhiqnhxdHtOflWdvYkVsjBLwxZyffr93HhBu7c0XXeJpHB/DuH7vYllNG86hA/m9EO1Ii/Plhbc1K8d7CSppHBdRK46hmf6kdf5OB4konu/MraRYVgM3lpk+zCGZtyWX13mLCA0xc3SORUD8Te1xJBPu21hgu5W0VMWLuc5L689uT4ikD8lDvcYesIJv8awsk1WiamKaO/ExWpctz4Odxsq3DlTJRmv3v+o3nCnfLRNQcAIFxkrNcuEeihJoNElFEZ5LS4Osmwc450O8+CZEtbCKTy2UfyOp/8/Mgub+snudtlVDeDlfK5GnlZzIZ7HgNrPuybjt63yUr/9V0vl6M/oxWmbzOq+fG12ywDGTiOom/SXlOzbZl78O26RLdkL8VRn0upZnNAWDyg2u/FVPB6hSR4nSJOshYJtXCRk+qXd1BcdxJDPPn1v5NGdWtCdFBFiYsSefnddl8ekM3Hv52Pav21ngI7M6v5JcN+3nnqk6kFVTx5pyd6DTomhTKpa2DGRKaS+ji9zAW7eCZiNbcde0dLCwMwmsNwtXpBozrJsr3sdWFEllTHTIOsH2mpA+NnlRbUAFJt9IZxBj4qq+lTLjeLIPw9V/B1VNqiyA/3inCT/dbxBPH6CcRYm6HRJkc7s3S+TpY+n7t91zyHgx/F/74PzFf9rpFkDnvP5CzWdKbcjZKOXGDFdoOl2gYTS8C1Y7f5DyV+bI6+sNtdT/8pL5iirz0fYmqi2ot3gf2UlkBHviECLM566H7rZLSVJ2u6PVIuPy2GZIG1ec+WPCKTEKu/a7+sH6A856Tv0FANHxzTY2Qag6Ei147YEzeQlK4ut5Ys9q8+gu44iP5O1z2rohky8fLZ9ZuhIg8h6dFWkOg+01Sqnf7DLCXy98+tKmKxlEoTiPOxIgcl8uFoWAn2p//V3vDjl+h47USTbnknboHdrtZ/L2soXIPbD9KRBhHmUSS9hon93m9AUxB+LrdDGhoXq9EFFtD8SX0hqoivD4vdrsTn6an3OMDn5vQUAt4vegNeowBkWjFe2HJ2w1eh74sk+ighiNJAsxGkiMCeGDqulrrc23jgrimZyIxRzj2SMSHWhnZLYERXZqg0zU8WbUY9cSH+tV6LcrY+MtmK44PcSFW3rm6MwUVTsrtLgItRiICTCdUxAFo3rw5q1at4plnnmH06NEUFRURExPD8OHDeeaZZwgLq9+7tT6aNm3Kd999x0MPPcRbb71F7969efLJJ7nzzjsxm/+esKjT6fjmm2+49957adeuHS1btuTtt99m4MCBf+t8jQ3Nd7QORgoAysrKCA4OprS0lKCgk2ug5XR5+GZlJk//vLne7U9c2Ipmkf5YD5gee3ywKr2I9//czQXtounbLIIbPpcQtiahVp67tC33fbPuYE43iLHqy1e0Z/qGbBbsKMDrgwEtInnsgpZc/fFySm21I3lu7Z9CldPNlc2hw89DRZjR6eGabyWSxRIiokZosqy4G8wyMbKXiGixoJ60jY7XiCfOotdk30vehvF9ZSIU1wVanC/mv/UR3Q4G/VsiZvathOAkmSBuP8Rbo9tNMnFdO7nmtYBoWQVqP0rauXGqGJg26S6TO49LIoE2/wA7f5PJ39VTZMK1a64MHirzZeW8912y6vTnSzLxHfQvOXb1BLn+W/6APX/CgldlP4NF3rfFUJnkRbWsf1AEElWwa66sgHW9UbxwFr8pA6WRn0mEktsu++2cLSbOIBEPN8+Rsp/lOXL9etORTWZPc05lX2yIrOIqvl2VRXGVg5IqN3EhFlpEB/LgIX5U1XRPDuXxC1uRX+5g7rY8flmfTbeEIN7qlEX4r7fXjvTSNCov/4LKkJZEVO5El79NvpOBsWIWXh/trhDh8NDvOcAFL8KSd+V7PfhpyFwmJcEzl0u0mjW0rjhpDRVxNrazCJx//Ee8C8z+8j2uZvQXIixVt90vDK79Xqq01ecfM+IjuYZvbxDhddCTIh4ZTCL0/HBH7f07XwdNz4FFb4q/gjlIRNLEXhKa77ZDeHMY8aFUpdJ0koK4eiKc/7wIMrGd4ZPB9X9mADfNlmop1jCJYkodIkLLzt/lHhSSJGlZQXHS56eOqXsOnR5umQsrP5VInNETpW+CiDGjvqgdcWMvFSHIEgq6xleD4HTsiwrFycbr9ZHyL0kbuKh9DO9f2/Wkt+FE9EVfcQbazIdkTHEoBjNc+SUENRFPv6XvSNRnYCz0f1DunV43oJdFNWcl/PECbP1RxiDdbsLX/HywBOHAhCcgHrshCLO7DIPBiN7kh8Hix2+bc7j7q7W4vbWnK7HBFr4f14fY6hSiinyYOKxm4fBwrv1Onl9HoNLhJqfUzp878sgrc9C7WTjJ4f5EBJr/UXlshUJRPy+88ALjx48nM1MVafk7qLtSI8Lu9vLzIRVtDufHdfsY2yeZqfPTuKZnIp0TQ/h5fTYZRVV8tGAPscEW3ryyE/+dvoWsYhuv/76DD6/vyubsUlbvLSY+xEr/5pFMXr6XP7fXhO61jAnkxZlb64g4AB8vTOPzG7vz6oIdvDH0fcKnjxWRY97zMPJzWdn+7YDpb3xXEVnS/pRJY8erxCx02QeSPuEXDt3GykTpl/tk0lSRJ347LQ6sRGevEZGioWiefg+AXyh8fqFMYs//rxznHwmp54oZatsRMOmy2sdV5Mqkbdt0uHKyRDWAeOF0v1VM9wwWaaPOKNf466OSUpHQQyadMe2lvf5RYn46aoJMTt02CRNuMVQiZOxlYA2Hm2bJv8uyJQ3m2xsl5WvrLw1/CXbNEe+hJW/LtVUbQtuKZcLe7gpZAYvrUlvsKs2SffbMl2igkgxJ4Rj4hEws/SPE2yN7nURkhCZBq2Gyj8mv3qYoauN0e/l4QRo5ZXZGdGlCamQA+0ttTFpWO5rDatTzyqgObM8p575v1lHpcNM3NYKPru9GtC+f8O+vqpuu5/PhP+s+/C9+Xb4nTbrDxW/A4Sukh5I2T4Saw4Uc/0hZEd2zQL7Dna8TAeX3pyUl8vz/1hVymg6ATtdIdMyCl6UvJ/eXPrbq8xrDca9H+ofHKf1l5AQxU65PxAGpVtf/ITHMvOg1+GmciLxDnpF+NGqihNBv/kEiZtZOlkiXXuMklQpNooNcVXDtNPmuej0ScaTp5d5RbZBcug+CEyXU/kiU75coninXStTOmi/k2kd/Iee2Fcnfx1kBC1+v/xxeD2z6Xvr2uU+LkFRNy4vkHIeKqCrtUaFo9FQdEoVzRpkdu+1i+NttrJjOazpJi9ryo6ThDn1BxmipQ2TcpjdCYDws/xTmHhj/tb4Eet+Dr/m5kNwHEnrgMQbhMQVgDgilut6qJN3XpKbnlNp55ufNdUQckOjx7TnlNUJOQKQsVEyqa8pKaFOIbvuXl+pvNtAsKoBmUQF/ua9CoTh23n//fbp37054eDiLFy/mlVde4e677z7VzWq0KCGnEaEdVor4cHSaRvPoQKlS9d0GwvxMTBjbnfVZJUxatpfPFqdzfa8kptzeix25FRh0Glv3l7E2o4THhrbkqZ83M2FJOoc/L/s0C+fjhQ1UJQA2ZJWQW+nluS1RPH/TIgK2fI2uYDsU7MR32XtoJRmSNpC/VUpxX/I2zP6XCCDmQBj6okwui3bL6vWhK/wgE8Wrv6qJqpn/kqzk//p4jT+O0Q8GPSFiyheXySSr5YUy8Tz/eTn/tunSjtIsMdeb+XDdkpled02ud++7RdD4478yIdMbxa9n9Bey+l+yVybCIz8TQSUkUTwyyrPh27FwyZuQPhXWTqoxRm52rkQcOcvFpLkkAxYecr0el0yOG8LoB54D5yrYWWMIDWKGfNl70HoYzDtsgq/pZDD2w+01r5Xtg5/vhv4PywBtwrDa/iNz/yNiXIuhR26TAoCCCgdTVmVid3n5bXMu46/rQpDVgP6wvN9/X9yazxbtYU1GycHXpm/Yzx/b8lhzU3j9ValAxI7q6kRZK6Uam/kIK67moLqluAOiZKBdnZaYvQYuekWMvoe9KU6Gmh4SekPmAV+cpgMknfDrq2rSg3bNlbSnq76SFK0VH8mgftccSY3aMFVExcxl9XtkVZO3VdppDRXB01YsK7x+4eKHpTdCfpqkKF1+ifSzojQRiDWdeGg5K6Rv97wTpt8vIjGI2NvzDvG0+v0pEU5m/0tEV72xrgknyE02ooVEsrW8UERmRzks/1B+QCLZrjzQp+tLuaqmYIcIZKs/k74JEjFoMItw3Iij4RQKRV2qDkQ3W416qs4QIcdrLxfvim3TxUsQZDzQ7WZJ9f71McjdAimD5T5etAemjRVvPoMPmp+HzyxpU96AONyRbbFYRSQx8NeTEIfbQ25Zw4UlNmaVMrDlIeml8d3kuTTr8ZqozJYXwdD/kyhKhUJxStm5cyfPP/88RUVFJCYm8tBDD/HEE0+c6mY1WpSQ04gItBi5vlcSy/cU1bv96u6JvDlnB/EhfnxwbRdcHh+ZxTZCrEb+fVFrwgJMrM8qweX28sCUdTjcNRUCWkQHkhzuz+JdhbXO6WfSExFgPmKhJbfXh8WoIyk6DAtV6IrSJMJj23S0vM3QZji+favQyvaLWZ3eKA/52U/Kqn6/B2TFfcWH9b+Bo0y8M4Y8JykO+9ZIiePLP5RVf5dNwnaXj5dVl/L9cpymg9aXiRHw1p9rzrdrjnhoXPaeeFscjt4IsZ0kHWzmwzWve1yyClWwQ8qjT39AJot6k5RBz9siniOjJ0OvuyTtaeVhlWZ2zxWPj07XS9Wa2I61t2+fCV3GiLlrfXQ4pIqQJVjEmWp0RohoKZEEh5cDTR0i0T/1seQtWS276H9Sonr5eIk48Hlh2s1w9yoIa1r/sYpa2F01fWpHbgUGDa7qkcjMTeJ3FBFgwmLS1xJxqqlyeqh0eg6uTP4lm78T8WXDN/Vv73hV7e99QJR856sj5KrJO1BRbdO3kgo08nMY9rp8F1Z9Ksa7391cu6IISETcr4/CBf+T1MDWw8TTyhIEe5eKwfHiN2UQ3RDVIs6oCeKJMPwDKMsSgbFaZG3STSLHdv4u5svL3hfTcGtojYB0+YciNB0qrDgrpfTsuU9DyiARPAt2SsRb5+sl5fJw2o2UCUtIkrxX7ibYs7Bmu9EKl74jqWmD/iX9fu/i+q+tSQ/ZtvN3OV+7EXItP46DC/9Xt+8rFIpGTeUB8SbYajxjzI61qkK0b66We2c1LhssfRfOeVgMjHvcCuUF8E5n6DpWIl9+vBNfvwdh2NvYjKH4+VnRceyTDqNeR5DVQJmt/oqFzSIPK5xhCZLnQ1wXWTDTGWU8ag48xndWKBQngjfeeIM33njjVDfjjEEJOY2M7k3D6J4cysr02pEkbeOC6JUSRp/UcFalF+H1+Qiyiuv9ztwKxn21li6JIdx7bnPm78jnsk5xTF2VdfD4t//YydMXt+HNKzsxadleEkOtXNkjEZ0mOcNf3NSDTxam1Vv67vy2MVzSMY5QXymm72+UVfZq7KWQMhit+fn4DGY0t0MmNqWZcM1UGRxkrRTfi4aEnPiuIoDsnCMmvx6XrPrMfU6iZwY+ARMvFgGm2aCa4/K2ysTs0Mnsodsylkm0wZ75Na9HtRYRpOuNtSNlDiVno6Ro3DRbTEhdNhj8lPh8eJzww61w4wz4tIFc7P3rJSXL4CcDjcDYGvEpf5ucM7l/zepXNc3PE4GpPBtiOkgE06F0uFKiEw4vBxqaLJEJDeFxSSTOd2Nlgjv0/6RENcjkN2ulEnKOAn+zgT7NwlmyW8TQb1Zk8PGYbni8Ps5rE83vW3LpmBDC0t2FDZ4jw+FPuCW4rokwSJTKoSbCBTvAFCieTEvfq71vk+7QfKhEgBXulMEsPhFxDh2Qg4iflbniEdX0HEkP9AuVtMKm/SWi5vDInmrspSJIrp0kFUR2zZFIldGTwOeWCJfUwZJaVJZV9/hOV4vYkvYnXP2NiDfV6YLVZK0S8eOiV0RI2fyDVJ6qyBERxy9crq2h6JhlH8A1U2DOc/L7+q8lLa3vfRLt5ygTQbbzddD9Nrnekgx4vxcM/jf0HCfn1psgMFo+64gWkobY47b6hRyTv3x+a76Acx6R+8L2mZJKBjKxUCgUZxSVByJygq3GM8fsuCyz7jOjmhWfwNhfwS9S7pcAtiJ8F70GbjtuczjGwDD+SXJ2VKCZW/ul8NrvO+psCzQb6JAQUv+BQbFA7D94Z4VCoTj9UUJOIyM6yMK713Rhzd5iJi3bi88H57WJJszfxJb9ZTg9XvLKHbw8azuFlU5igy3c1K8pD5/fkldnb+feb9ZyVfdExg1MZXN2GZuzJUrD54MXf93GR9d35d7BqRRXubh5wsqDK0x+Jj0Pn9+S2BArU1bWCAUjusRTZnPx9tydvNDfROShIk5sJxjwqFRrqizgYIJJm8tkxeSrK+H8/wNziEwUI1vWVF2qRtNEJClJhwGPyCTqUA+ZwU/BH8/Lvz0uWeHX6cWjIqK5TNoaYvMPMgmuFnKC4iTN68c7JWS4tJ6JZzXZqyW9ZN9quZ4Bj8EtcyT9ImulGAofOuk+nKxVsPtPiZS57D3xramOwpn9tJijuu4TrxJNBx2vlsoOKz+VSIFmgyW9q5rQZPHxmPtfSU3J2SDlk6u9fX68E66bJtV94rtI6fTtM2oqY+mN8iVY84VMmCOa1wzebCUNX4fiIMFWI09e3JrL31uC0+Mlu9TO/jIbXh88OKQ5o7o2YX1mCWWO+lcWAd5bWcEHw97BOO2Gw8yOdXiHvYVuyZs1r/l8sPJjSQFqcYGk79jLRPAzB0kaltsB4S1ESFl3WKnw+K4SrbJvtYiLcZ0ljW7failznTpEztN0QMMX3eFKSFsg+351ZU3UTmkmnPOolAJf+xVc+JL0h1mP1/jpND1HBMsVHx/wVTDVX80NJBWpIleMjC99R8TZ6vMEN2l4ogEizBgsIppWf27T75e+P3am+OB4XHI/yNss5sUjP5NjZv9bPLJ6jZOS5PvXiwCUNh9+e0LEn4tfF6+iygMid0RzSbHc+btEA27+vnZ7zEHiwaNQKM4oqtOpAi0G8sobTgdqVOzf2PA2ewngg/e6H3zJ1/IibJoVv4gEjMfh7Q16HVf1SCC9sJJpa2oqr0YGmPnsxu7EVfvjKBQKxVmIEnIaIdFBFi5sH0vr2CA+X5LOVysySMuv4Ls7+vDVigy+W10jQOwvtfPCjK3cNSiV89tEM3tLLh8tSGNYhxgeOq8FVpOe1XuLCfM30SkxFKtBR4nNxY0TVtaaR1Y5Pfxn+ha+uKkHm/eVYjLouLZnIi2iJVz16WFtiHAftiI+4FHxZDm8TPiWnyTVIHUIvvBmaF43rP5cvGw2TZMJldshfjf9HwJTgFSpqS4XfCjhqSJaVLN9JnQeI+fTGxtOJwLxmonvJu0MTZYKWzqDTAx9PvGycDcwGLOESBSN1y1tzlop6WLWEFnttwTW9q85nIBo2LsIetwsn1GP2yRM2RQklYB2/i5pWUkHqnX9eIdEVzQ9RybcFblSqcdRLkJSZCv4arS8vnuOnC+hhxjYlmXLhNkcCAk9IX0BmINFBLMVw/KPxH+lmtUTZYJeLZAl9mr4M1TUIjncj4k3defD+WnsK7Hh9sDD367nqWFtmLQsnWcuaYNe0zFpaf3RI1EhAXxdGMyF184lfP2H6Aq24Ylqi7fnXeR6gmjS4iIRWqo755qJ0PseiajS9PLdtRXL/+c8I3/Hha/DyE+hIlvERxCxYcBjUkmpOtpm8/fiP3XDdBFMNk8ToS+6rZgOz3yoRqyoJiRRInq+PiDimAKgz90QlAAfD6wRdlZ9Kn31+p/k+xfXRYwpP7ugZh9bUcOmyCA+DMnniDjrKKvxiqrMl+ttCHOgiJGXfyzVqAp3Sz/N2woTLq6JfrKGQqsDaWBbfoKWF4jHVtcbIHu9iFVTb5DPs7pfr50skVGXvCV9ylEqpsoB0dD5WtjyQ+00R6OfRAcFHaG9CoWiUeJwi5DjbzZgK2ogirER4XVWoQtLaXgHo1XGWbYD44fQpvgSeuIXfHwjDiMDLTxzSVvuGpRKVrGNQIuBmGALMUEW8e9RKBSKsxQl5DRSfD4fE5ekM3FJOgBmgw6jQcf3a+qPIvls0R6m3t4bm9NN+yYhFFe68LcYeGXWNpweH2U2F+mFVdzQJ4mSKleDnjiTlqVz37nNWZtZwptzd5JZZGPGvf34ankGl6To6RWSKGkJocmyKn+4iFPNmi/w3DJXvFXH95XIALdTVvB73yMrPVmrYM6zYk56+ASyGnuZmIZWR5asniARJYOfgh2/QqdraypQHU7rS2HtFzIxLs+RFf6AaDH+DYyVKJjVE+oeZ7TKxLEko+a1kgxJedrxm0QNXfkltBkOm76re3xwgkyenRWw+UdZ8a+ORBjwmGyv/v3Q1fwNUyTywuuFVROk/HPn6+Rc816QCXFkSymvntTvwCS1RIye+90v1YMWvS4loQNiRORpNliifw6N7infL6WXQSI0lEHgMaDx6cI9NI30595zm/P49xuocLiJCjSzaV8Zo8Yv44EhzbmhdxITDxNzmkb4M7ZvMj6gSq/DOOQ1KitLybXpyc8u4dxEG77kfmi3L5T+kLUKYtqJQbE5sKZaSMZSMaB028XjZfh4WDNZzLt73C4iSEQL+ObauilT5zwCS96p/b3b/YeIMJe9X9vwWNNECM1eKxFwQ1+A0BT5fk24qK6nTuEuWPq2+MpMHiHf0YteFR8mr1sEKP/Ihs2RQ5Mk9dAvTPYLjpeKVGXZ4s1jDa1rXg7Q/Rbxc+h+i3wms/9d/3t0vRHWT5F/VxXJvaPH7TDrXxLFFpkqPjf719U+LneziGg/3CZtGfp/IuKGJooolr9V7jGhTUVcDWoi0XUKheKMwnnAdzDAbDgjPHI05wH/Qf+I+sdgna+TFHGDWbwQ+z+M7gSlYQdZjQRZjaREqmpSCoVCUY0aTTZS3F4fmcW1J2GFFY46Fadq9veSU2pnUKtoEsKsrNpbzPrMElbtLam1n7/JwJrDXjuUvYVVbN5fxvt/ij9Lq5hAzAYdo7s1weuDsvNeJ+i70TLRKsls8DzYS8DjwueyyYTst3+Jr0RYM0mxuPhVSRspzZIIkYGPywTscPRGmaBVR48AzHxEIk+6jJFS3bGd6k6+/MKhyw3wyeDa1WsqcqVs98LX4PZFEgWQtaJmu9FPUqEWv1W3LRnLJfKl41WS3jXwCVmp2v1HzT6hyWIm++0N8vu6L6X6w63zZHU/NAXWfN7w57Z2slS+aT9SJtf718IPd0qFoAGPiTi09kuZkN44UyIQfD5Jb7vwFan25fVIdFNoknzWmiZG0s5ySXOxhsjnfu6z4mGi/DyOGj+TnnNaRvL0T5vpnhzGjtwKAJamFXJx+1hmbNzPG3N2MvnmHvRNjWDW5hwq7G76pEYQ4W/iqo+WkRjmx7COsYRYTbSMDqB1cCmdFz2KNn2BvImmQatLRPws3ScDbEuQRKpUpxtVU5En3kfXTgOjRSrCbZsu4t3hXkqWEImUm/Ns3Qsr3CVeMD1uE5Enqq18vyuLIaQJ3PqHfDdXTxABxNvAJGbzD9DyYhGQds0WQePGGRKBVrxX0p3qe3+DRcyTv75KxNsrJ4twWt23/3wRLh8Pv9xXI+qCiKkdrpLrtoaKCHvVV2IIXpFX83m2u0L6U7WHTYuh4HHDZ+fL/eGcR0Rs0hlh7CwxWS7bJ75CrS8Rj6CkvtD2ckCrET+D4+UndUj9n4dCoThjqC4g4X+GCDnYi8Uvb/gH8Mv9cs+rpvn50P5KWfQZ/QU+/0jcxoDjkk6lUCgUiqNDCTmNFKNex8CWUczZKpMRl8dbb2nyDk2CuaVfU/Q6HXq9RnKEP3aXh4vbx/LOH3XTGLKKbaRGBbBxXz1mq0BqVAD7im0YdBoXto/h3nOb838ztpJVYmNk1wSiWnWBG/7AuvZTjNFtGr6AoHj0mcukQtKAxyUixFlZkyb1zTWSzrFngUTWaDqZjC58DQp3iODT/RaJBmjSXarmbPy25vw5G0B3o5TQPudh8bbYNE1W5FtdLNE4jrL6SxCDCDZeNwx/X1IjstdCWIpUilr4Wk2J40OJaS8pI3OfE2HJaIXz/iuGqQU7RagKjJFy5iUZIiZ1vl5W6TWdRAOFJtVvdFuNvbgmKkDTwZ8vi3izZmLdffO3Skpat7HQ+16JZMjbIlE7Rj9pX8EOCGkqk/P0hdLW+G6gN0v7/EIbbouiDpqmcW6raN6btwuP14fZoMPh9vLpoj28MrIDsSEWpq7MxO728uDU9fRvHoG/2cAXS9JJK6gEoLDSyQPntWDMZyuYc3sbLLNuQztUTPT5xCNGbxLxYfGbMOZnERsWvS4CZXVFpOx1Ermzfy0s+xCGvwcdRsnf/3CaniN9rSHWfw1XfALhzcRsfOM06HmbRMHYyyH1XEjqAzmbGj6HxyXCSWwnScHSGaV/tR8FtkJAg07XwbrJNcdYQsSzZsGrNVFwtiKY8ZD4PqX9Ceu/EXH18g/lnCV7pT9V+2U1Gyjm46O/AKO/9MvgJiIeBUTC7nnw8z1ybv9IEV4qi+S9m58PpRm4zB2wJZ2HozSHiIFPoG38Vsq352yE8BSJQPr9Wbj+exVxo1CchVSnVlmNelweHz6fr/Gn/uxfJxGeg56Q9FF7qVRAzFohY66hnfBZw3H6xWIOUdG7CoVCcTJRo81GzKCWkYT7myisdOL1QU6pnahA80GTvd7Nwrm6ewJP/bSZUpsIFnqdxnW9kogLrr/I8W+bcxh/XVd+Xp+N57DwHp0Gt5/TDL0ObjsnhfxyBztzyhnZLQHTgRKRe8t85JVHspkbuD08iIjgJvWbBvceJ6vY+dvE4HjYGyJwVHu1VKcfNRss6Vll+6EwTaJdolqDKQBf7ma0ZoMhMA7iOknJ7/ytknpkDRaRKH2RTHoTe0GfeyQdaek7MOlyScFqO6KuGSlA/wdh8dsyIesyRtqzb7VM7OoTcTSdTPg+O18EKRBz15kPSzTB9d9DyT4xE979h5RD7nWHlDFe8pYYoHYZI6v3yf1rV/46lORz4PenJJogoWdNbnp97FsNJivMeFCMmB1lUv3r96dq9knqKwJUm+EidP18L1z3vaSJJA8Qk9qAqIbfQ1GH+FAr397Rh/nb87i0Yxzfrs7C4/Xx0LfrGdAikv9c1g6LQU+Fw82vm3LqPYfZoKNjk2D8XMW1RZxD2fw93DhdfG2+vEL8Z678Uv6+C16RfZL6iIH3io9lID7zYYmuuXKyiDmHplbp9A0LmyCpj0XpEvGmaSK0LnlLInGq06hiO0lq0aLX6z9HZCvxner3AMx4oKa/azroepNE5HS/VfpG1iqJDtN0cm/IXntIWw0iRBanQ2Jf6TNFaVJSfOfvEqnnKBPR6/IPxUPI6z5QhW257KNpcM5jsORtMTzX6SVaaMBjEjE39CW4+mt8BbvxJvbkum/3k1GUz5c39yQyMkA8cBxlkj6Zs0H60KjPpV0KheKsw+GS+6DZqAPA4/Vh0DdiIcdglqjmjCXw091yXzX61dxbr/kWHFV49q3F3qED5lPdXoVCoTjBJCcnc//993P//fcft3MOHDiQTp068eabbx7zsbrj1grFSSc+1I9v7+hNv1RJfRk/P42Xr+hAoNmApsFt/VN4+NsNB0UcgMGtoujTLByXx8dH13fltVEdaR0beHC7w+1l8vK9fDymK5GBNY/liAAT713ThZkb95NRZGPiknSu+WQ5475ay7gv13DLF6u45YtVpET607NpGB6DH5d9mcXeYVPwNjnELNcchG/Ic+CoqKnStG+VTCgveFlWzK/6WtKNHBWSRpE6RCZaqz6BX+7FN/tJimwetlq74CnaCwXbZHL56bmw8TvwD4dpt4iIU03uFhEs9Cbx1fC6ZcW+1UXiHWIJlv2C4sXrw2WDjVMk9SlvqxgMJ/WF9leI6fChaBqM+BjWfVUj4hyK2y6pTjFtYc9CSVXqdYd4lKQvlAGRvfSAN8nP0Ha4CEaH4xcOyX0lSil/G3x7I1z7nVTUqW9/a2hNe3QGEabS/qy9z97FMP9lMa01BUDXsZL2dum7kLepxhxXcUwkhvkxsmsT7j23OW3jggD5M/+5PZ+Hvl1PsJ+RhhZqh3eKIyLAzCcjmxKtHUGo83nFm8XkL/3HUQqThkPGspp99i4Rr6Te40QAzdsq3+0VH8Ogf9U+X+ZyiappiJYXiEE3QIsLYd9aESYP9cLZv06iz5L61j1e0yQ6LihODL4PNdj2eaV/7/odfv+3CDSL35Ky41PH1BZxYjtKiP+oL6TP5G8TM+Of7hKTYleV9Kdqo68//iupaOEtpAz48g8lLawsG364Vfr26EkiTPV7UNKz8rfD/BfBEopWsR/f6i94+bKW/DCuDylRBzwaitLlHIP+LZ9l2T746BypHKcqvSkUZx0OtxejXsNwIDra3VCue2PB44a+94rIDSL0V99bo9tKlPKqT9GFJOBnUuvCCsWZxvjx4wkMDMTtrincUlFRgdFoZODAgbX2/fPPP9E0jd27dx/xnNX7lZSUnIAWn32oO28jJyUygPev7UJxlROP10eI1civ9/dnV14FazNLcHpqJll3DEjBz2Tgvm/WYj+wchQZaObZS9oyaVk6y9JkYuXy+IgMMDFhbHf2FUv55Cqnm29WZDCqWwJGnUaov6lOW8YNaEZkgBm9XscdA5txVfdEdhVXor/wM6IMFejcDrzmYAw7Z6DNeabmQI9LonZ+e7L+iwxrJt4u5fsB8DmrmLTTyG0ty9Bv+1mMSHfNkX13zwVXJVw5SYSi4gyIagUpA+HXx2WCefNsqTxjL4Of7hRh5qJXJHLGFCAT491/wJBnIaGXeItkrxH/HI8Tmg0SY9R9a0SUiWwpETJZ9UTqVJOxVExeL3pFfEhmPFh/RasV4yHqLUklWfmJlJTWdNDyIkmRmvlwzb752yQ1pHAnXP21iDT710lpZI9TopQOlnPW5LOpj71LZGA2aoJU+5r1hKSXdB0rk+/KfPldcUxYTQYSwgx8fmN39hRUsnhXAeEBZnokh2E2alzcPpbpG/bXOubxC1vRLcpH8N7ZRKx8VXxwGkLTyQB7zM+wfRZs+bl+IdFZIX/X1PMkda9wt3jphDcXwW7Fh5JiZwmByNYS6XV41Jk1VLxmvr5Sfm93hZTwro/fHoeb54gIuu5LGfjHdhIRJ2uVvL/bXv+xS9+FPvdKGtWQZ0XwORS/cLjkbUn9+3q0+Pys/QJaDWv4cyrNkr5+xSci8nhckkZ18asSZbRvNVTkiMH59PslVQogcwW4bWAJweAXSbLVBsHhsq08R/Yt3iMRf4eyZoJEFllDGm6TQqE443C4PZj0uoNCjsvjxWLUn+JW/QOqCqTi4OhJErmYsUzGR+1HQ8sLRbze/CM0G4TR0PB15pbZyS6xkV1iIyHUj9gQC5GB9UeFKxSKI2ArljG5vUwWoP0PFHo4QQwaNIiKigpWrVpFr16yKL9w4UJiYmJYvnw5drsdi0X68rx580hMTKRZs2YnrD2H4vP58Hg8GAwnR8pwOp2YTHXnvqcaFZFzBhBkNZIU7k9KZABhAWaahPrROTGEPfk1k7omoVZSowJ4/fcdB0UcgPxyBw9OXccdA5phMmhc3jme63olUlTp4tJ3F3PbpNXcMXk1D05dz/ydBdz7zVoCLAb8TDUPbbNBx50DmzGyWwJ6vXylogIttIgJ5NzWMTSJj8cU3RJDfAdMEUnowpJrX8C+VZIa0RDxncX4eMp1MOU6bP7xjG0D1vUTocNoSdE6lIxl8OUo2DFbRJeUwTDxEvHWSegOX18j4ovZXwSMXXPg+9tk5X/yCKm4s/1XqapTmglTrpd0kDbDxY9k9r/hxzuljHJRmkwuA6JlItgQgTEiiljDZPKdvrD+/UozwecWQ9qgOLhtvkxA/cLk+gsPU7rtJSK4ZK+TaIvAWEnvGPMzGKwi8HS+XibzR8JVJb5E22aIYJW7if9n7z6jo6q3Bow/09N7A0LvIB1FUCmiIGJXFAuIHcWKDeurchW9tmvvCnaxK6KCAiodpHfpNUB6n8mU98NOMplkJiSYCvu3VpbmnCn/CXOSOfvswi+T5GdTFOCkW1VJQkQQ/drEMnFYR64e0IqYMCuFRW4mDGnHZSc2x2aWY6ZPi2hOSLDSfv93xP10DRxaD+lbJQjiT8eRsGe59Crocp5vU+7y9iyTkqoTi6eTdbsEfr5PSqC6XiiBl+6XSnCi33gY8pAcI5HNJTvukvfh53vlOAB5PwaaSFdUIIHQokKZSnXzQpkAFxwtwdiy097Ky9wNzfpI6v6SNyVD76ynJYh51hQ5FuzZkn105mTJohnysGSUBWIJBrMV/npWegCNmyn3i2wB4YnSG8gaJsd2SRAH5Hh1FUkwdPbD0o8qfYc3gy5jR+Dn3L8i8D6l1DHJXuTGYjZiMsrvdKerkWfkmG1ShjrjLvk9OWoanPM/uRDw+RXyuzXtH2/Gjh+70vIY/fZiLnx9IRM+Xcl5ry3gineWsOcYGM+uVJ3K2gdfXguvngjvDoVX+8JX18n2WtKxY0eaNGnCvHnzSrfNmzeP888/n9atW7N48WKf7UOGDOGjjz6ib9++hIeHk5SUxBVXXMGhQ9LPdefOnQwZMgSA6OhoDAYD48aNA8DtdjNlyhRat25NcHAwPXr04KuvvvJ5fIPBwM8//0yfPn2w2WzMnz+fwYMHc9ttt3HnnXcSHR1NYmIi77zzDnl5eVxzzTWEh4fTrl07fv7Z2wPS5XJx3XXXlT5Xx44deekl3yE248aN44ILLuDJJ5+kadOmdOzY0e/P6N133yUqKorff5eL5evWrWPEiBGEhYWRmJjImDFjSE31Tv3Ly8tj7NixhIWF0aRJE55//vmj+Jfx0kDOMSoiyEKflt4o7ag+yUxbuMvvbe1ON6v3ZPLxtf0wGw38si6Fr1fsq9AjB8DtgenL93JZ3+b8NnEgP956Kr9NHMQdQ9sTF1bFCumknpKSW8Jpl+BL3+v8vJBm0OMKmXZjDsJ16t3YIpsQsXe+nGg17VVxUk+J/SukvOqPKXLi1WusXFXKTZGGxSarlBxVeJEueczUzfL47iLJjinKl7IRkMBIxg4pzfrpbnm8nlcGfs0nTwCjEfBIGYm1khGaO+dD3+sl68FZKGVif0/1n20R30kyjX59QDID/p4Kn10uJ/WrP5MglMHov/SqLJNFsnjKW/tl4BN2dVQSI4Iwm4ws35lOsMXIi5f15PUre/N/53UhyZRN5KL/em/853Mw9BEJbpTVbqhMFPv9MTyWUNkWUsl0sbAECbC4ndDvJunJlJ8qgcjfn5BSpt8ek0DGV9dIwKPnlTLprN9NEhRsfjKccIn04YlMlscIJL6THJ8hsfDtePjlfil96n6pNPeu7H77V0qfnaIC+PRSyajrfL4EK/PT4J/fZb0/3CrB109HyZUpfw2cQda89G3Y+KOUa70zWIKV+WkwYyJ8cok0J+8wQnp1ldS8tTtD+kblHZarYCs/hHeGSCNlf783yqrs+K6M0+4NlimlGhW7043VZCwdPFHkdh/hHg2cNVR+7+cehD/+K73DvrlBPmtEJsvnitAYjAEydlNz7Yz/+G92pPp+dvnnUC53fLGS9Dw/nzmUUhUVZEifqu1zfLdv+10yjAsyau2phwwZwty5c0u/nzt3LoMHD2bQoEGl2wsKCliyZAlDhgyhqKiIyZMns3r1ar777jt27txZGqxp3rw5X3/9NQCbN2/mwIEDpQGUKVOm8OGHH/Lmm2+yfv167rrrLq666ir++OMPn/VMmjSJp59+mo0bN9K9e3cApk2bRlxcHEuXLuW2227j5ptvZtSoUQwYMIAVK1YwbNgwxowZQ36+BJDdbjfJycl8+eWXbNiwgUcffZQHH3yQ6dOn+zzX77//zubNm5k9ezYzZsyo8LP573//y6RJk5g1axZDhw4lMzOT008/nV69erF8+XJ++eUXDh48yKWXXlp6n3vvvZc//viD77//nlmzZjFv3jxWrDj6i38ayDlGGQwGTu+UQLhNTjiaRAVX+GNa1oYD2XyydDdf/r2XJpHBbDscOINj44FsMECzqGCiQiwUFLlIzbVT5KriuM3IpjD2Bzw9LpcAAsD2eTj7XEvG6Bk4u1wCbU/HdfYLuC96T6ZA9bwK99gfMOQexPzt9bB3MZx0k0ybKX+SW1bzk7xX2Zv29O2bs/YrOOWOivcxGGUcd3Rr+eV4/mtwxv9JyVJsO+/tTrsbfntcAka7F8pjD7xX7l/6WAbpuxHdSj4Q/T5ZTkJ7XiUlXaOmylWu0Z9IhkFEUykDe3ugnGRjCBwganmKBJOy/GQ4/P6E9NrxuGWiVeZuSYX2p/Vg3xHp5e1eFHifOipRwRaiQix0bRrJLZ+s4LW5W4nxZJJsyfUtuSvIkEBep3Pgii8k0+qK6dCkF3wtgU9DfEdctnA8A24N/IQn3SiBl9i20OVCmfTWokwfm6J83zHzqf9IRs6az+GN/hJQcRbIeyjvEDjyJUvGn8hkydjpdzP89Zw0Ag5LlF5UwdFyHARKBe4/ARa/LicLQx6QbSnrJDOm2yj46R5oNQC+G+8bYPzreRk/Xj6A0qK/9PYpW/7kdsL3t0gg59AG2Za9T3rp7Fkqv1dCYqT31MqPfR+vIAOWvC2Zdf76AIEEiJNO8L8vkJyDkv325TUSnNo8s7SUVCnVODhcbixlAjmNPiPHYITTH/L93AOSYXzBGzJ04tyXIMJ/NnJarp2NB/xfCFqxK1MDOUpVVd7hikGcEtt+l/21ZMiQISxYsACn00lOTg4rV65k0KBBDBw4sDRTZ9GiRdjtdoYMGcK1117LiBEjaNOmDSeffDIvv/wyP//8M7m5uZhMJmJiYgBISEggKSmJyMhI7HY7Tz31FO+//z7Dhw+nTZs2jBs3jquuuoq33nrLZz1PPPEEZ555Jm3bti19rB49evDwww/Tvn17HnjgAYKCgoiLi+OGG26gffv2PProo6SlpbFmjUxGtlgsPP744/Tt25fWrVtz5ZVXcs0111QI5ISGhvLuu+/StWtXunbt6rPv/vvv53//+x9//PEHJ50kFyhfffVVevXqxVNPPUWnTp3o1asX77//PnPnzmXLli3k5uby3nvv8dxzzzF06FC6devGtGnTfHoQVZf2yDmGNYsK5oub+nP75yvZn1lAm/hQ1uz1P9q6U1IEi7ZLZsuh7EJaxISwfn+239u2jQ/D6fLw5KyNTF++F4fLTZjNzI0D23BFvxZVy8yJao7hnBfIO+V+dh3OJiwimm1ZQdzz5X6GtB9PeKiBVcvstIsxcVH/t+hl2k7QB2d5G6s6cmD7XFj8mpRefHm1t7lpiehW0gekpJTD45Er6SUnymu/hNPukRPAFR9C5h5ppHrKHXLbyGTZvv4b6dex/D2ZtvPDbRKgiWnrHZcO0t+jx+Vywp26BazhUpoS3Ry+uhbCmkh2Q3hTKV/Z9INMibIX/5wTT5AGrmGJcO5LUt4V10GmYbmLYM10WbvBINuGPgbvnxX4Z7x3GSR1l745P94uvYE8btjya5l/zKEw4r8SOArErLXsNS3P7uTNP7ZzZpdE5t1yAk1c+7EV/iPZGJd+JH1d/vwv5KVK8OD3x+WOoz+RJtkgWSjnvQKzH8W0fyUF45cS1P9WDIte9X2yE68HW7hMt8pPk/f1qRNh0H3ywcDtlPf24Ackuwzk//98VnragLxH134lX4MfkABsTFsZdb7qE+8xlXgCnPkEHNokfXk6DJdjLCRWmgjPvEcCRBe9A7/9Hxxc730tp94lx03aVtmWuhUueleaauYdlhT+jmfJqPHyx/reZZJtN/pT+Xll7YXkPrL+r6+vOI2rMEuOI5PVNxNtzedw3WzofB7MuENKF8vb9KM0AD33f3L8lc0INBjggjflGK6qnIPS1Hnbb95tW36RSXuXTA14kqSUaljsRS7fZseNPZCTkyKfXYb+H1hDpLQ0MlkCPD/egeeyj8mN7UV4gLvn2is/Ock7wn6lVLFC/+djVd7/LwwePJi8vDyWLVtGRkYGHTp0ID4+nkGDBnHNNddQWFjIvHnzaNOmDS1atODvv//mscceY/Xq1WRkZOAuzkzcvXs3Xbp08fscW7duJT8/nzPPPNNnu8PhoFevXj7b+vbtW+H+JZk5ACaTidjYWLp161a6LTFRPpOVlHgBvPbaa7z//vvs3r2bgoICHA4HPXv29Hncbt26+e2L8/zzz5OXl8fy5ctp08ZbYbJ69Wrmzp1LWFjFrOxt27aVPk+/fv1Kt8fExAQs26oKDeQcw4xGA12aRvD5DSeTXVhE9+RIrp26vMLtbGYjw7sm8fIcKRv6eV0KL17WM+Bo5BsHtuE/Mzbyw5r9pdty7c7i/jsu7jijPbZKGt+VsoTgjkgmLz8HJ0b+OZTGgLZxfLXKeyV65R64pHMQQb/c6jsdJ6m7NOnN3i/N9i58W048U7dIvXaHETJGOD9NyjW6jZIyrc7nSplWib+ek74y3S+DkS+CxwWOAkhZ5c1ScDmkN036dmkuPOh+mfrjL5Vx9WfyFdVSgjEel0yqKsyGA2vgn1+lwazJArPLNbI9uE4auF70npRInfW0vLaWp0C/W6S/idspJTE758t67P4Dc4BkTZRkPOUdlilgHUZA32slYGC2yVj3onxo1lcmg4UlQlx7OdFNWSsf2FqcHPg5VLUdyCzg8neWkJJdyJXdw2hBOsbfHpTm2SUSu8p7+psbvIGCyOZ4YtpiOPtZya4x22D+/yQjrM84gnJ2y7/fFdPhwGrpX9DyFFjzhZQOnXKnlAcNnyJ9niKbSUbYXy9IJkpcJ7j8c5nqFN3KG8Qpb/Hr0ntq00/SY+ayj+UYMVklCPPj7TDyeel9tW+5ZJx9N14agoNMhPrxDglmtugn06uKCiRout2bvkvKWojOgVUfy0S7bb9LELTsbcrau1xKuC79RAI3bre8zkCchRLYLRvI8Xjk5GXDDG8ZZXmWEJkWFtcBbpwnk922/S4ZfL2ukhOd6owg37vUN4hTYvdiyZTrVUnJplKqwZCpVcdQaVXmLvns8MOtcjEgLEF+Xxd/9jE4cvE4zUCs37tHh1gxGCrG3QFMRgORIZZaXLxSx5CgSkrZq7L/X2jXrh3JycnMnTuXjIwMBg0aBEDTpk1p3rw5CxcuZO7cuZx++unk5eUxfPhwhg8fzieffEJ8fDy7d+9m+PDhOByBM/Byc6UK5KeffqJZs2Y++2w23+SA0NDQCve3WHx/lxgMBp9thuKS+ZKg0ueff84999zD888/T//+/QkPD+fZZ59lyRLfQR/+ngvgtNNO46effmL69OlMmjTJ53Wce+65PPPMMxXu06RJE7Zu3er38f4NDeQcB+LCbcSF24gNszH5/K5M+XkT+Q4pg0qMsPG/y3qx/XAuvVtE8/euDHLtTn5Zd4DHz+vKf3/ZRF7xbUOsJqZc2I3IYItPEKes9+bv4PKTWtA8JkC/inLsTjefLtnFjDUHePqi7tw0qC3/HMplU4o3HTfZmuc7qhikX0zJVe/138DBtRKgiGgmwYd9K2Tyzra5kgL800Rp7HrpR1IqlFMmSOUukivwf0+VkeOuImg/XMojmp8s2RAxreW2fz0nwaDzX5EMm0Cy90qWwRdXybSeojKN/ezZkq3gT346HN4omQDZ+2DRK/LV8Zzi0pQoOblb9JqczCefKNkI/jTvJ68Jik8+jf4nDXU8G8583DtN6MAqCE2QiUmOguplF6gj2nIoh5TsQmxmI+e2C8L42yO+QRyQTJXfn5Dgy+xHwBpK3oVTCd33t7yXUtbL+/uEi+T9ZTBi+PA86Z0T0waa9pbHsedI0HPVJzDsP9BygPSUyU+XqWdp2yWYk1ncP2vXInmMvFQCKsyC3MOSXfbj7RKAKWEwSgBz2XvyfVQLCTzmHvJ9jOx98rqiWsKA23ynsZWIbQNJPST77qML4dwXJSstrr3vOPKyErrCghdljPmIZ6WEy1/A1WiW8qiyx2UJSwh0PhvWfu7/OU66EcLiva+v7zXQe2ylDT8DsudI/55Alr4tWUgh/k+UlFINR/lATqPPyCn53APyu6psOavZBtYwzGk7oIn/KTVxYTa/0xkBLundjDg/00+VUn6ExksGvb/ps22H1vpk2SFDhjBv3jwyMjK49957S7cPHDiQn3/+maVLl3LzzTezadMm0tLSePrpp2nevDkAy5f7XhQsyXBxlWnH0aVLF2w2G7t37y4NFNWmBQsWMGDAAG655ZbSbUcam17WSSedxK233spZZ52F2WzmnnvkM2zv3r35+uuvadWqld9pWm3btsVisbBkyRJatJAhHRkZGWzZsuWoX7cGco4jkcEWLjuxOUM6JZCe68BkMhAbamXD/izu+Wo1L43uxV//pPLtir18vWIfRoOBL27qT77DhdloICHCRkK4rXRMuT92p5vswqKA+8tbuSuDb1dKUOjuL1fTt2U0j5/XlXyHi/X7s4gKthIf7uek8vAmGDQJ2p8pmTpF+bD8fcksAMmaWfulnOR+dIGcfIIEMka+IBOedi2UjJtT7oQvrizuSVNs3VfSs+K6WTLuOGsvJPeVq/4bf5SvMx6THjwlz1lWlwvk5Dk/VQIhZU9kI5MDX+0HWVtUSwmmnHyzjI1O3QxfXC6BqnNflrXsWiTThT4dVbF0pE1xU9aSaVU9r/SWrJS3fZ6kTs+b4nsboxkufk+mD9mOsnmrqmBzcZCyTXwoIe6cwBPMDqzCM+w/OM54irzWw7EUpOLZOAND5k4JWJz/ipQ65R2WzJzeYyGhs2TxlAQorKES0Bj+tAT1krr5BkUjk6WcaPEb3m1BkVJuF4jRBHjkKu3wKdIzZ98KuVrbeqBkDgVHYTAY5fkydgZ+rMxdcr/yLMHSaPyTUdKv6qK35ThKWQ/DJ0sTYne5nlwGI/S/RSbUOfJk6tXgSfDz/RUf/+RbfDPzStgiJEMnvKlM8yp/mxb9JfDp92dyFDzuypsbu+z+L2crpRocu1NKq0xlxo83akHRkNDF20usrF5jwRqOpfi1ludye3C6PTx4dmcigyx8+beU4dvMRq7o14JbBrclLEgzcpSqkuBoKaX/4TbfYE7bobK9FkeQgwRyJkyYQFFRkU/AYdCgQdx66604HA6GDBmC2WzGarXyyiuvMH78eNatW8fkyZN9Hqtly5YYDAZmzJjB2WefTXBwMOHh4dxzzz3cdddduN1uTj31VLKysliwYAERERFcffXVNfp62rdvz4cffsivv/5K69at+eijj1i2bBmtW7c+8p2LDRgwgJkzZzJixAjMZjN33nknEyZM4J133uHyyy/nvvvuIyYmhq1bt/L555/z7rvvEhYWxnXXXce9995LbGwsCQkJPPTQQxiNR9+yWAM5xxmr2URydAjJ0d6Mmb0ZBRQWubnlkxUM7ZTAUxd1w2Iysi+zgLunr+bdq/v6ZNhEHSEdNthStZOa7IIi3v5ru8+25bsyuOztxTSNCuaNK3uxKSWXNRlW+pa9sh7XQaba/HSX98p8WIIEbxK6SGZJfEcpwdizxBvEAemX8/kVcpJ4wsXS+HXrbN8gTonCTFj4ipy4tThZTuIWvSond24nLHsfLvtQGh6XlHsYjJLd0/k8+PYmCRot8W3URX6aNDXO9p/VRHxnGHA77F0iJTIpa6XZ7VlPy2Sijy6QE7zmJ8uJYEk5zO5FkmXQawzEtJK+GwaDvMZ+N8lodX+6Xwbznq4Y6HE7JSgwfj6EVTIRSVVL23gJijmcHikpqoTLY+ClzFO4cccfhP820bvj0EbY8K1km4UmQEG69E364irvbayh0vz3x9tk5PbOP709aUqYbVKCV1ZhlgQmQuP8Z+Z0OV/KibL3S2+qmDZyTGbsgLVf4rnqWwx5aZLRZTDCrgWBX2BonHwAMVm8wciwRDlu/nxOvv/rObhhrjQ6PvsZCWCe/xrMesTb4C80TnpGFdkle8WRJ88b30lGpy9+Q35m0S2lZ09sO/j4Qt+1mKwyterPZyXw2eo06DMOVn8hwZ2eV0jJW3hS4NdTXUGR0ldrzxL/+7tdWusf0JRSNcNe5MZsMnp75PiZ/Nmo5B2GS6fJyePu4jHDRpP8zup7DRTlYYhtU+FuezPy+XH1AWas2U+ozcw9wztw3WmtcbjchFrNxIfbCKri50SlVLHIZnDJe3JcFmZLOVVofJ18RhgyZAgFBQV06tSptN8MSCAnJyendEw5wNSpU3nwwQd5+eWX6d27N8899xznnXde6X2aNWvG448/zqRJk7jmmmsYO3YsU6dOZfLkycTHxzNlyhS2b99OVFQUvXv35sEHH6zx13PTTTexcuVKLrvsMgwGA5dffjm33HKLz4jyqjj11FP56aefOPvsszGZTNx2220sWLCA+++/n2HDhmG322nZsiVnnXVWabDm2WefLS3BCg8P5+677yYrq5I2GUdg8HiOjct9O3fuZPLkycyZM4eUlBSaNm3KVVddxUMPPeTTqGjNmjVMmDCBZcuWER8fz2233cZ9991X5efJzs4mMjKSrKwsIiJqryaxLh3MKuTiNxeyN6PiSeVV/Vrw8DldfP7oVnb7U9rG8vpVvYkMPnLKbFqunSveWcLmgxWnGkQEmfn0hpO5+I2F9EwO5/U+KcTOvF6uTl/+mQQp/JVMXPUNeJDslRMukYyYss19ywqJgau+gzmPy4mpPxHNZJJOSKz0GmneT6ZKWYIlUwVknHlYEuCRE+PDm6V0pGlv6Sfyzyzfx0w+CbqPgpn3ln82aSx8w1z49kbvtK0SzfpIAOv94b7bgyKhx2hoMQCadJcZ8dl7JA06NF6+3E54LcDY50s/kpHTZacllXXhWxLMqsUa3KPRWI/F/ZkFnPPKfDLyHWy+vTXWt0/x7f9URsY18zmQb6DL10Ol7K28iKYSyFtW/Md980zJyjmtuGlxfrqU/zjyIDcVPEXevjEhsXDea4AHPr/c93HjO0rT4u8n+AZzkk+EEc9Iad+6ryssx3PROxhcRdIEvNM5Ejy89EOZsOXwMzVv0H1gCoIOwySQaIuQXgwz7vQGncw2mdb1/nA5FqNayfqCoyTjrDBHekUtfUeCMb2vluOn7M+o55XQaSSYbNJkOW2rvL7M3XBwA8R3kJ/bwldgx58S5B38gARtrGHFjdJrachj1j748FwJQJcVmQzX/AJRzWvneWtQYz0WlapJV767GJfbw8W9k7n3qzV8Ob4/J7aKqdM11OSx6Nm/CsP3t8Dpj8rvUUeefA7YvQRSVuM5dSKG0HhphFxsT3o+o95cREq279+rwR3jefaS7sSH6/AEpZSqKcdMRs6mTZtwu9289dZbtGvXjnXr1nHDDTeQl5fHc8/Jld3s7GyGDRvGGWecwZtvvsnatWu59tpriYqK4sYbbzzCMxy7EiOD+Oi6k7jhw7/Zesg7dnxk9yRuH9q+wpWTxMggPhh3Ile9t4SD2d6ygA6JYTxzSfcqBXFASr3O6JzoN5Dj9sj0LLvTzZKdWTwblcztl/1K4q4fMaVu9R/EAZj7H7jgLeljkbVPSiRCYqHPNTJ+3OORIMuydyUYk7kLLP6bWQHyASX5RCnbGj5FgiMhsZCdIlkqqz6B9K0SPIptA90vl7Ul9ZDbbp9X8TG7ni/TrHpfDSs/8p7Eh8TAqI8kjbl8EAdg399ychvXQZo6lyjMkoyc5R9IY9mdf8BJN4PTISe4302ANqdBu2GwdVbFx7UEBw7igEyqmPUI9B0nGQ7VaeSqKmgaFcxnN5zMddOWMWcPDD/hEgxrp1e4nafDcObtM3By2EH/QRyQrJiCTCm1W/yGTI06/WEpeyobgGl7umR0pW+XwERRvmTz/PqAZH816yPvrxKHN8Pvk2HM95LJkpsiDZBdRVCQBT2ulMyckkwajwdnaBPMe+ZLIGTA7dI/x+WAP/4Ll3xQHBQqzqAxGKD7aDkO3C4pT4zvKP1pMvdIyeDqz2VseEisd/KcySrNxPeX6ylUIqGLZNud9TSsmCbNvGPbQ7sz5Fj8/XEJFp1wiZQ3Ou3yOtZO920+HhovvXhKGPyXD9SIyGYSqFr1qYw797jkZ9Pn6kYRxFFKCXuRm2Cr6dgprTq4TgLef70gF7FsYfLZxGWHbpdhsOfIZ6qYVgA4nC6mLtxRIYgDMG/zYf45lKuBHKWUqkHHTCDnrLPO4qyzvKOY27Rpw+bNm3njjTdKAzmffPIJDoeD999/H6vVSteuXVm1ahUvvPDCcR3IAWgdF8ZnN5xMaq6dnMIi4sJsxIXZiAj2X0bVPjGc7245hd3p+ezJKKBtfCjJ0cHV+iNtNhm57KTmfLxkF1kFvj1eYkKtpV3GAT5flcaMjWbeHH0zp2x4lICnVSnrcHvcGFO3SFlCQhfofA789byUaICcoA15UK7Or/wEelwmJ4z+dBslJ5/th8nJcUE6tBooJ3/f3SzZDj2ulPKSvcvgrVPlxLTLBXDyeLhmJqz4GA6slJPW7pfJ5CuTVRrPdrsE8tIgNBbwyKjhWZWkEa77Sk5KywZySkS3hMim0HOsNFvOS5WMpKI8WPgq3DAHPE5pllyi9UApM4luLaUx/iSdIKOdV34oJTot+wden6qSjknhfH3zANJy7djbPoLNbMOw+rPi8fJG6HohhmH/Yag7iLC0tMofzFHc0Di+k/R1+XZ8xbHZ2+ZI8LLZidJvZsOPUlKYtg1+fVCyrvYslawze7b0WDrtbvjjGZlmFhQJuQcl8BHZAka9D6s+kuPG7YLwJDxD/g8ikuU9HhrrLVfcvwJ+ewwueF0CNbmHpBQqbbuMMC+50vvFGMgovo85SJogx98LG2dIZhzA/lVy3AQK5LQZLIHaXQtlHcExMrZ81efQ/RI48SZoM9A7ISu2nZRNRjaXZtCRxYGTxBOq/4/6b0Qmw6l3S3AXjwSvTP5/9yqlGia7001ksAVzcfZeo292HNVSLnb1uQaWviWff1oPlkl6C16CwQ/JsIhi6XkOvlsZoGQc+Hzpbk5uHYsxQF8dpZRS1XPMBHL8ycrKIibGm9a6aNEiBg4c6FNqNXz4cJ555hkyMjKIjj6+exHEh9uID7cd+YbFmkQF0yQqmH7/4jmbRwfz7S0DeG7WZn5dfxADcHa3JCae2RGj0YDFZKCo+MNQrt3Jmwv2MqBdh8CBnMhkCggmeMCdGOa/gGHQfcUjnMs0eE39R7ZdOxs6nQ2WIOl/serTcos7SU5ol74lTWVLpG2TK/ijpspJ8/pv5MTzulneBqwbvpOT3IvekYavMW0gfZvUmhdmysl6+2HQ8yoJ3uxcIGVRjryAZTaAPL4xwAneSTfLifKSd2D919Ks2BoGp94hJ82Ht0Cz3tIvx2AsbqgcL5lAw5+U3kHltTxFmtWWNM+deQ+M+c47tUcdtcSIIBIjgoBIGa992t0SRLGFS98bt5OIv6dCbNviUj4//XQik+W9PfsxaUx9YFXFIE6JlR9JcMNVBKfcLuU8IO+56WOg9SBpDmwJBpdTJsF1OEvex2X7TA26F3643bcBZk4Klh9uxnP+qxhWfyHvMZD3nzVEmnV/Mkq+73we9B4jAZmfJsLZ/4V3y5SOGc3QYbj0+Ek+UZp9O/IkaLpjnpRXrfqkYlZeUCR0vxQ2/Sy3/+0x774Rz0jZWXIfaWxeUlGcvl1KH897RRqZz3pYAignXAxDHq7bjBiTSca5K6UaJbvThbns1KrGPn48oin8PQ3WfendlrZNGuRf/aN8jgj0ecQPY21mNiql1HGolor+69/WrVt55ZVXuOmmm0q3paSk+DRpAkq/T0lJwR+73U52drbPl6o5BoOBNvFhPHtJD/66bwh/3jeEpy/qTqu4UOLDrDx6Thef2y/YlkpO23PkZM8Pz2l34y7MwvjtjRhaD5QGwOVHl4MERP6YIiern10OHUZIr5GeV8hJ3AWvy1SG/DTfIE4JRx4seLn4CjpyEvrPb3CS9/2Gxy0TiXb8Cb89KqUeJSfZCZ3lZHf7PMns2fSjZP6YbJIFFEiXCySjoGxzM6NJHqtJd/jrRWjeV6YODbwHhv9HMnPiO8tJ/p/PwaeXyf3jO0oQB+SE+cqvJIMJpPyk303QbzzM+Y/3uQ6ukwyQenBMH4u2UBn12qSHBP1sYXIl9Lf/k2lTQ/+v4n2MZpnotORNybza/JPveNjyigrky2SV93XZSWcej7wXZ9wlwcnt8+TKa2JXaTxcMv46OEreO/6mmACGec/I8eN2wWWfSPPggffC6E+lH47LLicF5mDpsWMNg00zvUGcpG5w5ZcSlFnxoQRW0v6RcqPLP5fMsV8fhIvfhY4j5ESiJCh6yQdgz4WQaN/sss7nyclGsz7w833+p0D98oBkuoH8XFZ/Dh+dH7gh+XHumD4WlTpKMn7cUNrsuKgOMnJq9VgszPQN4pRw5MHvT+AxmnymDsaEWrmgV7OAD3fZiS00G0cppWpQg8/ImTRpEs8880ylt9m4cSOdOnUq/X7fvn2cddZZjBo1ihtuuOFfPf+UKVN4/PHH/9VjqCMLtZkJtfm+HYOtZs7v2YwuTSN5fe5Wdqfn06N5FNm2poRd9hmmr6/xjtc2GHCfeCNZSQOI/nQE5KTA4Y1ywhrIvhXSxNfjlvKRrH3SC8Nkgd+fkKtRrQcHvv/Ov6QXT4mDa2ViVJPucvXf7ZKMG49LAjUlmTa2cDhzMnw5zjfT4bubof1w6XES267iFKmELpK9M/dJOPs5OZE3B0kgZ9MM+f+uF0gJWNlx560HQv9boVlf+X7g/VLSUtauhdI/ZPCDUu+evl1OtJe+45shZDSBoX6mTRxXx6LHIwEcgK2/SdPd0Z9K6VPGLin9Oflm6dU04lnIOSDBkcrGxIfGS0AmP13en+2HwVo/H9JBGgN/fQOYioNFw5+S91dIjLxXAsnaI5leboe8n8sGUTsMhwvfhqy9UoaY3FcCOfuWy/6gSHmu6WN9A1LfT5DGyZ3PgzMekzKr7P0w8D44daIc6/v+lrKvxW/AsP/gsedgMJqkN1DqVvj+Zrj4/cC9tRy5gEeO/5JAT9o2SFknvweUj+PqWFSqihxON5ayGTl1EMip1WNx65zA+7bPw+B2yu/q4mbHVrOJa05pxcy1BziQ5dsnZ2inBNonVPL3SSmlVLU1+EDO3Xffzbhx4yq9TZs23vGH+/fvZ8iQIQwYMIC3337b53ZJSUkcPHjQZ1vJ90lJ/kfKPvDAA0yc6B37m52dTfPm2oCyrkQEW+jTMpqXL+9FTmERGw5kc+Yri+nVLIyHzv+VZp4DWN0FBDXpjCE0noj0HXJiB9IENqySUoWwRO+J3c750HusXO0HKas65U44vCnw/Q0GpMtxsaa9pClg+jbod7M8xp4lUqpy2UcypSp7v5S3LHvPN4hT4p9foddVcuK8bzls+ql4pPm5klVTUpr19XUScLrwHfj9MTjjCalV/+7misGrHX/Kye+pE2HCUpmwFRzp3Z99AGY9JCfY31wvGRHfjvdf4tVxpDc7o44dV8eixyU9aUqs/BjWfwddzoO2QyD3MARFSQBj5t3e7JrzX63YDBsgvIm8BzN3S1ZVTBsJOmbtg93lAjPJJ8qH833L5Ht3kWTqFOVLkOa0uyXLpkkPCXqkbYXl78n7xxIsZXcfjKiYHbTlV4jrBHHt5H182j0SJEnsIqWIPUZLdpG/rKJNM+S4+e5myeq5+id5f6asg6BwCZ6u+waGPgLp2zBs/U2CRmHxEBIH4xdI2VqlDBWzdbb+JhO1lI/j6lhUqoocLjdmo6FOS6tq9Vg0VJK0X5INafYdbpEcHcJX4wcwc90Bfli1n2CrBHf6tIwmrhql+0oppY6swQdy4uPjiY+vWj+Offv2MWTIEPr06cMHH3xQOrO9RP/+/XnooYcoKirCYpG63tmzZ9OxY8eA/XFsNhs2m/7xqW+hNjO70vO5dqpcvV+0M5tzdmYTZjNjNQdz33Abo0+KgsNl+ohs+12mOK340P+D9h4Lf38g/394k2Q9JHaVviAhsVKGMfL5wItqO1RKt0B607Q7Q4ItG76H0BiYdo53GlRSd5n+ENVKTnbfHhj4cbfP9fal6XmVnDDPfgTmPuV7O5NNGjePmibZRSZL4AykdV/DqXdJOVV5RXlyEl5i1aey1pIx1SUik6XUyuUIvPZadFwdi0YzdD5fAnklHLnePk7dR0PeIZkMVdbs/5PeTQtehm2/SWAiqqVs+/Jq7/QnkPK5K7+SpserPpWrqj2ukBKv7yd4b2cwSmDxp4mSTZbUDVZ/Cn8+K/ub9JRSvsWvS9bXvpWBS7z+fl/KrUAaFjvypHHm/P9BiwGSARbIzj+lx9OOP+H9YXDTn7KW3IMSVE3sLH144tpLWeDuBXBgDSx7R7LjRk2VMoCy2WolgqL8T27TbBy/jqtjUakqKsnIqcvSqlo9FlufFnhfuzPwGM0Ygit+dm4WHcx1p7RmVO9kTCYD4UHauF0ppWpDgw/kVNW+ffsYPHgwLVu25LnnnuPw4cOl+0qyba644goef/xxrrvuOu6//37WrVvHSy+9xIsvvlhfy1ZV5PF4+HTJrgrbc+1OsMPbf25naOdE4sObSPmP2yWlFht/lGk0v/2ftxExyNV9o0nGLJeYMRHGzZAsmp/ulm3b5sKJ18vEn7KCo6W05avrJMAx4hkZy1yUD6fcAW+e4ntimLIGvr5eykNOurG4PMmFXyXr371Yvq6fI1kUZRkM8rpmPSwn5IMnyboDcTkCB2CMFu8YaYD134LZBldMl4yE/DQZPRqeJE2ix/3k7a2jak+rU6XBb9ngC0iPm8GTpMSuvPw0KWdq2hP6XiPvwfCmMma8/OPYs6Xx74Vvyvu5+cnw872+Y8iDIiXDbMcfcMUXEvD48Hzf7JYDq+Cra+CqrwGjNA8OxJ4j6wfJiMvZj8vpwHjZRxicdgkaBWr2bTT7HsP//CaZOuUnWOUelCbdySfK/pL7LHlLShK/usb3cQxGOZaWvuX7OAaDlHQppVQVFBX3yPGWVjXyZsfZ+6HvdZJxWVZwtATLPQE+wwBGo4GoUGvA/Uoppf69YyaQM3v2bLZu3crWrVtJTk722ecpTpePjIxk1qxZTJgwgT59+hAXF8ejjz563I8ebwycbg8p5Wquy0rLc+Bye6QPSL9bYNErsmPVJxI8Gf2ZXIk3GKTp6eHN8NU43wcpzJQAyx9lejIteVOatF78rpRu5KdJz5muF0p/kgtek/Hhv0+Gs6ZAfAfpV3LphzIda+HL0sS4xOaZ0q+m87mSJeNP60FSSlPC7ZRJUZtnwv6VkmHR5XzYNk9GNG/8QV7L6Y8G/gEGR4Ml1P++sHjodhms+liaMCd0kTVPv1oaKw+6H769SV6PwVBcUqZqXWQzuHoGzHtaGk66iqDlqfI+s0VI1lZ5wdHgLPBmy4D01tm92P9z5B2WrJ3wJGlkfGC1736PWwIouxdLP6m9y/2XKDntsOozWVeT7oFfU0RTKXkEPN1GcbgApq/2MKxTa9rHujD0HgexbSQryFUkQZ/NM2XiVqvTYPn73seqLDNs9WdyjIx8Hr65UTKFTh4v48uv/EqO5dTNkkF30g2SRVf2Z2QwwkXvSk8qpZSqAofLjdlkxGAwYDRAkbuRjx83muTzwcXvyueV/HRo0R9anQILX4VzXqjvFSql1HHtmAnkjBs37oi9dAC6d+/OX3/9VfsLUjXKYjIyrEsSv230UxYBnNwmlrAgE9iCJCMmti38+V+5opSyRjIBOp8rJ6sgfTOGT4G/npOpTqHx0vfD7axYevHHfyUDpeNIOPE6Ke14Y4B3v8EA578u063WfO692p/UHS58S8pUcg7INrdTvgbeC7sWePv5lOhxuZxMl4yajm0LKasly6HN6RBfIOv75gYpFfnqWm+fn7xDUhp2cH3FH9Cpd8qHMr8/3BBpsHzCRTKNaO8ymQ500k2y79eHpKTmgtchNFHWlpcKoXH+H0/VnOiWcM7zMORBCbgEhUuwxlUkGTR7l/nePigKcnz7gPktGfLZ75JJU448CXqu/8a7z54j/XXOfVmah+/4I/Dj7F4El0yV7K7o1r6To0r0nyBBynNexJVwAs/8coCvV+zjk9VZ/HpDJ8JPvBbD19d7p2IZzTJJ7sqvpY+V0+59rJYDYH6AE4mCDEjfAfOmyNoNBplO53ZKcKjjCMnYSegqWU8n3SiT63b+CcExUlIQllTaxFMppSrjdLlxe8BikgsdZqOx8WfkxHWAn++XiwOdzpHf6/v+hgX/g0ve92ZXKqWUqhfHTCBHHftOaRdLYoSNg9l2n+0Wk4GJZ7YnzFZchx0WD33GQceziq/q2yC8XNPjsHgJVHQ5X0Yfm4PkRDnvcHEJR7mT3/x0yQpof6aUHpXV+TzJlFn1ie/2lDXSV+T0h+D7W2VbTBt5jp/ugnNfkmat234HWySccKEEnn4vnkBhskiWzZzJ0rMnN8Vb4tXzCjkhLjuF5/cn4KJ3pERka3F/FFs49L8NTEH4NGYuz1kA39/iG1ha9i6c9YwEwKxh8vgl+xNPkCBVQhco14tK1TBLCESVCyiYLNB3nPR/cZbJVMtNkWyWsjwe+fcrmfBWXmzxBDNrKAx9FHbN9wYzDQb5sP7FVfJBvrLm4aHx0hh5/guSCbPgJW/gJyhKysGa9ZGAaFgCZmso158WzIYDWbw3IpyItFUw8x7fkd9up/S4Ckv0LS/sNUYCUIGyclqdBjvmyX0imsEPE7zHtMshGTglmp8kfXXCEiC5T+DXp5RSATiKgzbm4r+HJqOhTqZW1Sp7Lpz3ilzwKuk1GBIjUzcLszE08penlFKNnQZyVKPRLDqE6Tf15+mfNzFrw0Fcbg89kiN54vwTaB1XrmzIYJBMgsoYjd5mpm6XZN4YjdDlQillKc8WLid85XuNdL0QvrvF/3Nk7CweCx0u2Q3DnpSg0eAHpfFxZHNocbL02UnbLiUfCV2lietJN0nWTtpWGQc+eJJM/sk7JD1Afpnk+1wFGTLSvPdYGHCH1K8XZEqZiTUMeo/xv8bCbPh5UsXsIIBZD8KNf8Cbp/puP7hOJhONny9ZI6ruRbaUfkXf3eydUhWWJBkmST0kkwukMXH/W+T9Xd4Jl0gApkRMa7j+d2myvOknKeXaPk/eu5tnwnmv+gZByhpwu2SDbfhO7tNnnPRRcDsloBrdSkaOl9EmLpRvLm9O8GcXSQlj2SBOWUvfhgvekCDMSTdKYDM4Wh6zfImZyQp9r5VsNZAg5eHN0ui7zzVyzDsLpVTgn9kyHS6ufaCfslJKHZHDKYEci0kCOWaTgaI6mFpVmwzrv4G1X8rwg+FPgdMBeKTkfMN3cOvfR3oIpZRStUgDOapRaRkbyrOXdOeh/CJcHg/hQRZiaqKhXlG+lFXsWyENj9P+kSauJaxh0vy3MFcaJa/5wrvPYJT7B5K1R8pgTr4FYlpJA1ZrBHx2mZyEBkdD76uhw3AY9pRkNRiNkHNY+vHc+KcEb6wRcPaz0sfHFiaZQ+U5cmV6UGGWnJSv+lR66pz/mmRc+FOQDlsDNKh1u6SkJaaNBJPKsmfLyX7/AEEsVbvMFgmMjPtJMsbwSBDjm5vg3P9J4OafXyX41/JUacg9/0UJ2NkiJMhy4g3ecsMSUS1kOlmvMRKE+eJK2V6YBXsWw5CHYN5TvqO6T7oRYtp6e/MUZkr6fVntzpDeUWXehzaLCXbNlRImf6VYJQqzJKun9SBo2ksCOY5cGPO9POfaL6XUqtso6fO0+nNvLx+TVfrgJPWQNaVtldff60q45AMqzVRTSqkqsBcHcszFpVXHREaOLRzOf1UuMP14hwTAm/aCUycWl5A38tenlFKNnAZyVK05lF1IdqETs8lAdIiVyGDvCMrcwiLS8xw4XG5CbWaSIoIwVLGJbliQhbCaHmdpskFkC9i1ED66SEpDQuPh0EZpBJvQRcaR958gdeNXfinNf62hUjduCQkczEnqDm3PgCbdihu9GmDGHd79BRlygpm1WzJ1CtLlsTbOkKlbY76Vk+a//ivriWoBgybJqOjFr/l/zhMulmyDi9+Tx/v1IZlE1WuMBHjK9rdxO31PyssrzJL7+rNrvpzEm/RXyVHLPiDlds5CKe8JTaheb5awBPkCSYXvOVr6Mg2fAr2vkg/cbpcEKYc8JJOo4jpAbDsp0fLHYJBgodMhx0WJRa9Bzyvhut+kj01+ugROts+FzTNklH0gjjzJjInrII8N8r7b8acEl6JbBb6vLQJCoiV7LecA/PW8BHZOuEjef6fcLiWP85+XwGrn82D0J/DDbZKZl3gC/HCr9/Hs2bD4DTi0yTsOXSmljlKFjByjofH3yOlyAXw51ne65/6V8OXV8hnIFFy6+XCOnezCIkwGA9EhFiJDtH+OUkrVNj37UjUu3+Hk710ZPPzdOnalSXDj1HZxPHF+V9rEh7EvI58nZ27k1/VSHpUQbmPSiE7EhFrZdjiX09rHkxgR5BP4OVqHsgtJy3NQWOQiNsxGXJiVEKuft73ZKuPE1xRfyf/mBsnCiUyWk9ohD0u/j4PrZIRx2jbpgWMJgSY9pQnywlcqPm5UC8mcKZl+Y8+Rk1B/1n0jzY4/vUyes9soGPudlKt8N94bbMncDbvPlxKYzTMrZjL0GC0nzcl94dcHpQltiRXTJJgz9P+kTxBIf574TnB4k/91NesTuKlsbAcN4hwtt1veQ19cCVl7ZZvJAqfcJdkyR9NM2hYG3S6F5JMge58EA1d/5nubIQ9Bq4GBgzhlma0y7Wn1p95te5fBidfD4S3wzy+SqeV2Qfth0PZ0CXD60+4MWPUxzt7Xst6VjNVkxO320D6mI9YN30nmTHiS/xK/k8fD4reh01nw8TjpbdV7jARnBt4nDTkzd3lv/9fzEjC6+icJ7JSdRFfW9rkS/FJKqX+hJCPHYvRm5DT6qVWZO32DOCU8bvjjWbjkfQqKXKzek8lD365l22EJ5PdrHc2TF3ajbXxYlS/QKaWUqj7tUqpq3JaUHMa+v7Q0iAMwf2sql721mJ2peYx9fxkz16bIuHDgUI6didNXsy+jgK//3sewF//ktTlbycirZLzwEXg8HjanZHPpW4sY8dJfXPj6QoY+P49Xft9KWq7d/51iWksmTsl0J0eujCjufrkEYEDGj391Dfw9FdoPlyv/a6ZLI9ieV8pJY4nErnDFl9C0h/ekvDDLt0FxeZm7pdTK7ZLykPx0mP1IxYwZtws+v0pGKQ/7D7QZLOOhL3xLeuwU5sgJcdkgTomVH/kGbcLi5XUb/Pw66Hpx8UhrPz8zgxF6XRH4tajKZe2Baed4gzggvWT+/K9MKTtaBiNYw4sbXd8qfYyGPyUByFuXS9lUSHTVHy+6tWStlBwXA++VrJilb0nQpmRK29bf5JjwF4CKaiGTojBg2vQDrdz7mDpvPY//uB5X14vlsef8R0oA4zp472c0Sb+bqBYQkSRlVEUFkhn35TUSSN2z1DeIUyJ1i5QF5qcF7r0DFUeuK6VUNTlKS6vKNjtu3Bk5nq2/Bd65ZzEel53th3O58t0lpUEcgCU7MrjkzUXsyyyog1UqpdTxSy+lqxqVme9gys+b/FbqHM61s2h7Gmaj/ys0b/65jZsGtuXh79bx9l/bGdwpngFtj27E9f7MAka/vZiM/KLSbUUuD2/8sY2kyCDGnNwSY/l1BEVKRkzboXJy5y6SevDQBMje63vbXldJNsza4qbIS9+UE85xP8nJpzVcSrNKsl5KBCpRKruGslOICrMqjkMvkb0X0rdJD5S49hJsmfWQjAY//RHv2vxZ8qZM6zHbip/jMIz+VLbvXSav+cTrJLXaEgyn3iVTiDzFH0wtIVK2Fdm88tejAtu1wBsgLG/uU5LdEp5UvccsyJRpZrMf8f5bmaww/GnodgkER1Z/nUER0H00tBki2WFmKzjyK06M8rhl6tRF78C6b2HjD9Lrqdtl0Ols+PgiyEnBAET++V8mnfE/ntndkZeWF3DHxZ8Q/P110nR7yIMSPPS4JYCzbS78cDuMmibjxOM6SpAyfTt0vxQ2/BB47eu/lsbhBqP351FeaGz1fyZKKVVGydQqS5keOUWNvUdOUCUB/6BI8Hh4YfaW0otyZWXmF/H7xkNcPaBV7a1PKaWOcxrIUTWqwOFi9d7MgPsXbE2lXUIYmw9WPIHdk17g07j47T+30yM5ilBb9d+ma/dl+QRxynp1zlaGd00kKTK44k5rqGTmlB/h7E6CTufBph9knHFwlG+gxO2Cpe/I1xXToXk//wsLiYMWA2D3wor7gqOlkbGjTJ+RI6Ulm6ywdyns/Mt3u9nm+zjlOXKLxzHbIH0HfDVOxor2vFJOjguzYM2XEBIvPVdOvVumYR3eLEGcmNYSZDBpHfxR278q8L6sPZKdU11p/0gwryyXA2ZOlDHjbQdX/zFBevaUHBdrv5JJUP7KoNK3S2ng2B+gZX85TrL2wUcXeDN3ADweon+7k/Gj/+D0qXsZ1vVEWo6ZR5QrFeOmHzCAZINZw6U5uMftzfSJaCqljSCPWVmJmNEiJWbtz5SAZ3mWEMmcU0qpf6F8Ro7ZaKSokWfk0O4MyRD1p8doMAexcndmwLv/seUwV5zUAotZk/+VUqo26G9XVaNMJgPx4YGzTpKjQ8gs8F8yFWI1+VzZSc9zHPUHoQ37swPuO5xrx1HdK2UhMTDyOTj9Yelds6aSbJeFr8hIb7+PEy1TICKa+W63BMO5L1ec9JOXGrgJrDVUvvyVPe1dLj1LAul+mdzX5YJl78m2/HRZ+3e3wC8PwP4VsPB/0og3KFwmV3UcAW0GSaaEBnH+nWa9A++LblW1HjZlFRXAAj99mkrMfyHw+7I64trDig9l3Lg/CV3g0HoZi27Phd8e8w3ilPB4iNrzG+f3aEbzcAjd+AWmqSMwLH5DGhG/d4YEpUqOOXuONDtOWSONmkEaenc5P/Bau5wnx9RJN8r7tyyTFUZ/BmFNjuanoJRSpRzleuQYDeCubIhAY2C2Sul2+QtKzfpItq7HRXxY4M97zWNCMAXIwFZKKfXvaSBH1aiE8CBuGdzO7z6DAS7o2TTgFZwLezVj5toDpd8P6hBP2FFk4wB0aRoRcF98mA2r6Sg+XIQnwikTZWRzYWbg2xVkVJ5NEdsWrv1FRjH3uxnO+R9c9jEsfLliv47MXdI3pHxJlsEA570i2RH+bJohpV7BflKjY9rIWHOQ8rGcSvqH5KeByxl4vzp6LQZIero/Qx46cllVXqr0finIlO+d9oolgGXl7Pcf9Kuu8KbgMch7Y/iTEJYo200WPN1GSWnUb4/LNrNNsr8CiHCmMbnrXuIWPEaQzQZXfOEb4Fr3FeSmQJ9rZPztaXdLhpE1VDJzDqySHj5Ne1V88JYDJECasha+HQ+D7ofLP5NG0mc8Jplz1hBw1cDPRCl1XHO4JFjt2yOnkQdy0ndIwPuK6fL78+Sb4ZL35bOF0wFuFxOG+P+8B3D5Sc0rlrArpZSqMRrIUTXuzM4JXNTLN+PEYjLwyuheNIsK5v1xJxJiNfns79c6hoEd4pm14SAAEUFmLumTXPqhqLq6NYskOsR/RsNtQ9uREB50VI+LySR9RkoCIf60PxM4wge4zN3wyyRIWQ1rpwOGiqVQnUZC14tkfPK1s6RxbatToffVcM2vEgjocZlMxSqv5QAJCl32sYwpt4VLVtGA22HsjzKNC8ASJE2SA2l5qox+VjUvqrn0VCqbKWIOgqGPQruhge+Xlwrrv4Np58LrJ8vUq91LAKNMowqkxSmSWfVvhcXDqA8gYxds+BGGPgKXfoRn3M/YBz0MM+6UyW8AhzbK1dsAjE26E/bLnRhWfQJzn4SvroXT7oH4jt4bLX5DTiZyDkhD5FHTYPsfkk0T2xZ+vF0aOJ/9rPQV6nwuXPi2vO9n3iuPkXdY/t+DBHZWfy7lXu+dKRk+Sin1L3jHj5dk5BhwNfaMHKNZMiELs8DjAks4ZO2XzxLL3gGDkQHtYrmyXwufu5mMBp69pDvJ0SH1tHCllDo+GDyexv6Xpm5lZ2cTGRlJVlYWERF6ghtIZr6DQzl2Vu/JJNRm5oRmkSSE2wiymHC63KRkF7I5JYfUXDsdE8PZmZbP/V+vweFyc1r7eB4Z2Zl2CUc/utLj8bDlYC7jP/6bHakSILGYDNxwWhuuPbU1cZWkAx9Rfjoc2gBfXFVxAlVwtDQBzk+TIIy/0dy5h+HTS6V0qURYgjQUjm4NJhsYkBPl3ENycrplJvS5VrIRzMEQEiX9PYoKIW0L/P6ETOgJjoGTb5GSEoNRMhIsIVCQLv133C7pGZK5C6JaQVSyjMF+Z3DFpsomK9z0FyR0OvqfVS06Zo7FnBQJzrjs0kMpLFECbP4U5sD852H+ixX3jf4UmvSE106sGBQ022R6VdmJUP+WI0/ey858sIZjD0kgNc9FjGM/tsUvYdzwHUQ2g2FPwScXV2w2nNQNTrpJRoiXFdEMBj/g3Z54Apz3MrxzunxvMErvhhb9IflEKUFzOSCqNXiKpI9T5m4JlB1cL5OunHYYPAn2/S3lWGU17S3T37Tp8VE7Zo5FpY7S96v2ccfnq/hg3IkEWUz856cNtEsI46XRfrIFa1FNHoue1H8wzJ0C3UdBUb6Uyka3gm2/Q7szIaYtRDYlq6CIwzl2Vu3JIMhionuzKOLDrQRbtQ2nUkrVJv0tq2pFVIiVqBArHRIrZgCYTUaSo0N8rta0jg9j9sRBMtEm2EJEcDX7g5RjMBjomBTOFzedTFquA3uRi5gwK/Fhtn//4SIoEg5tgqt/lCk6W36R8eAdhkvZ1a8PQu5BaXgc3bLi/R05vkEckCDKLw/I//e9Xko+Fr4svUBi2sjkn8jkirXqliBI6g4Xvy9XzowmmThlLJPJ5PGAKUiyJH6ZJOstEdMGrvoGrp0t/Ug2z5QT7uQTJcMhtlxfEVXzwpOqPp0q77BMD/Pnp4lw/Ry45hfJUtm/UrYnniDle4F6LR0tayjEhJZ+awOaeXIhP1cCRue+JBlGkc3h2l/l/b1vOVhD8fQai6HtYPj6hoqPm73Pdzx6p5EQliRXh91OeX/+M8s7oj2+I1w9E/IOwYbvZerb1t/g4DoJgo6aJg2XC9IqBnFAjkWnjslVSh09b7Njb0aO0880p8bEsGuhZP3OegTanyEXkdZ/K+W1RlPphM3IYAuRwRbaJYTV84qVUur4ooEc1SCUfBCoaQnhQUdfRhWI0SSNhPf/LWVPF78v23f+BV9e7R0pfXgTbJ8nH4B8mhsbJHOnfDZPibB46cFjskofnISuRx4bHRQhX+Vl7ZV+Oas/l8yFLhdA1wtgxkS5wpa+XTKLxnwHF74pgQKXUwJJEc2OPDVL1a3UzYHHaOekyPumSXe44kv5tyxIlz4HuxaDNUz+XYOjAvfm+TdcTrlSO31sxX2XfACXfy4BE4MJgz0H3jyleHJagMcC7yS14Gg462kZb16W2QYXvCnB0c0zYessydjpfilE3CVlXvGdJcj64d3+nys4Wu6jlFJHyeFyYwBMxX8zjUZwNfYeOYld5YLAyTdLvzJ7jpSVtx4EBzdAZIsjP4ZSSqlao4EcpY5GdAtpLLvsXfnyx10kmREJXaR0I7I4mBMSA33G+S+PMRihw1mSWXDzQsnoOdrpUFl7pY9K+nbvtj1LJVPo7P/C98WlKwfXSTaDsxBm/5+MMw9NgAG3SeAnPPHonl/VPMsReg6U9EvK3AVTz5ZsrAteB0cWfHKRlPy1OAXOfAziOgYu4ToauSnSkNifH26FWxbLtDOQ0ieTxX8gx2yTr26XwpAHvFlt3S+VpsYLXpL7N+8HJ14nj/PZZVJSVWLvcikzG/mCrKnvNXJs+QuCnTwBQvU9rpQ6eg6nG4vJWFoObjoGMnKw58L2ubDpJ++2A6th9WdSQu5vGqFSSqk6o5chVYOTmmtn3b4spi/bw7zNh9iXkY/zKMeQ16qo5oEzG2LbSjkHSD+dzTPlpBrkPr3GyFWtsoxmOPd/0idl8AMy5vlogzguF6z6zDeIU2LPEsl4iCpT9lWQCe+eKVlFTjtk7YGf74Of74W8tKNbg6p5Ma0DB3OSukFIrJTpfT9B/h2HPgorP5Y+MdnFU6u2z4F3hkjT35qUnxY4y8yRJ+WGJUITYPCD/m878D4Jwpz7P99G0EGRkNwXLnoLxnwLA26Fv56XyW1lgzglDqySrKSo5vL9qKkVG4O3HQq9x0gTc6WUOkoSyPFmsBoNBlzuBvi5pTpcdt8gTom8VFjxoW8Jt1JKqTqnGTmqQUnJKuD2z1axdGd66bZwm5lp155E9+TIo55iVSvCmsAlU+HTUb6ZBdZQOO81cOTDiP/C0rdhxTToeiGExsltMnfBsMmQc1AyYIKioPlJcoXL4wbzUQZwShSkwupPA+/fNAPaDJZ1GQzF2Qp+rq5t+B4G3quNYBuKsCS5EvrFlb7ZJUGRcP7r8v46vFnK+qyhEqzbPq/i47hdUqY05hsJ/tSIysvw3Bi9Vw4sQdDrKsnQmfsfSNsqQZshD0uAs7L3myVEAo8fnA09r5D3aCCbZkDn8+Vn0X4Y3Pq3BDIL0qVZcmSy95hUSqmjVJKRU8JkbPwZOZ5/ZgX+rb7pJxjyUF0uRymlVDkayFENRmGRi1fmbPUJ4gDk2J2MeW8Jv941sGGNszSZZcz3LUtgzeeSedO0j2QNzHoQ9q2QZsWnPwLb5njvl3MQZtwlzYf7XAMtTwFHLvz5X9j6u/TFib6q7l5H14t8GyCXt3uxZHuo+me2QtshUna38hPI2C59DJr1lSukA26T6WQA8Z2kuXAgB1ZBYXbNBXJCiydulc28KREUhT0ojuCy20JipF9TywHgKpLjKayKJU7p2yVrrCraDJZeVgYDxLSSL6WUqkF2pxuL2RvIMRoNOBt7j5wjBOc9BuMRbqGUUqo2NaD0BnW8S82x89Xfe/3uy3O42LA/u45XVAWWIIhrB6c/DMP+AzvmwYfnSRAHJNPgm+uh91jvCbPLARk7ZYz5X8/DxxdJg9h/ZkuWxf5VMlY8Y5eMFN+5QHqCOO1VX1dIHPS4IvD+TiNh1wLofbWU3yx+PfBtbRUnj6l6ZA6CA2vg0HrJNNk2R8bZL3tH3ksmi0yNctplfyBGszTuriHpxhjShr9asXzJYCT1zJf4eadHUvIPbZQ1p6yVMrCwBOkfVdUgDkDaNvnvtjnQ+ZzAt+txuQS6tGm3UqoWOVxuLEbv7xmTwYDL08gDOV3OC7yv64V4yv+uV0opVaf0t7BqMBwuN3Zn4JryfZkNeESwqwiWvQc7/vS/b/l7kiFhC5O+N9GtJJjjT+dzYc0X0qOmeLwnlhAZ59zxbHmMIzGaoOflUl5Vvk9OiwFSxtW8n5xEm4OkV8iWn/0/TvN+R34+VXdyDsDvj0nPm/LStkqmyoVvwvsjoGlvCWL4O6HocmHgbBx7rgRkyjZDLswu7oHjkVLA4Cjfuzg9TF4RxgOjfydu08cEp66lILoTaV3G8srKIm7vXwRfjIHdC713SugCoz+V3j/VEdtO/rt3GZxyh4wgL98np2kvaHtGhXUqpVRNczjdPqXfRgO4GnlpFZYQ+Tux4Vvf7aFx0H8CbrdHrwYrpVQ90kCOajBCrCYSI2wczPafedItuRZGJtcURx7sWRx4/95l8M8saYIc2056gXxzfcXbRTWXQM+Pt/tuL8qHb26Am/6EJj2qtqbIZLj6R9g0U6ZMmCxw0g3SGyQkFixlCl3OmgIpq32DAwYDnP9G9TIlVO0rKvAfxCmx72/odwuM/wt2zIcznoDZj/jeJqoFDH2kYsZO5h7Y8its+E7G2fe7GRI6S0+ZXx6Q8eIeD7QeDCOelsyf4qyeYKuJtEI4fdp+hncaRZv40ezKdvHLR4e4bUACTeY/5BvEASlH/OJKGPOdBBWrKqa1vIbM3fDDbXDuy5C6BTb/JP2e+lwLbU+HiCZVf0yllDpK9nLNjk1GQ8Mc0lAd1lBofRq0GVRm/PggGUF+eDOeVoPre4VKKXVc00COajASI4K4/6xOTJy+usK+zk3CaRHTgPrjlGe2QWQLGXvsT0QzOWlN3QLtzoD4DnDtLJj3lLcZbVRLGVP+26OBn2fhq9JDp6pjoyOTJXjT7RI5wQ2UnRDTGq6bDbsWyol8dCsZ9xzRDKwN+Od+PDJZ5QO2I8///uhWYLZIlkp0S8jPgHZDYc10yN4nZXXJJ8p7o6yM4pHlWWXKGzf9JH2cIpvD1t+823fMg3fPkGBR8WSpqBAr95/VmYvfXMiM9ak+D31OOwvGT3/1v96D670lVlUV0VSCP9PHwsF1chwNfkgaGluCIThGG3QrpeqM3ekql5HT+Jsds+UXWPyGDDw443EJ4tuzZQpiTgqWsScC2ixeKaXqiwZyVKmCIhcpWYXM2XSQvRkFDGwfT+cm4SRFBh/5zjXAYDBwescEnr2kO8/8sonUXAcmo4GzT0jigbM7kxBexeBFfbAEyzjk9d/433/SDWAJg7mTYd4U2RYSI9kSw58GjxNC4yW7wd/I8BJpWyUjo6qBHJDMmpCYI98uMlmCN90vrfpjq7oXlgB9r4WFr1TcZw2TkqIS5iAOGaLZkWtlW/i1RCdZ6ZIUTozNhk/no6JCWPA/3yBOib8/gMs+ljT7onzvdkcuLH0XznisdMpap6Rwpl5zEo98t47d6XLbU9rF0izYWfFxyypIr3y/P7FtJZhTmAVZu+HHO2UaXMm+C96EJr0kqKWUUrXI4SzXI8doaPSlVYbCLLjgdZjzpEw5BCmrGnQ/HNpUv4tTSimlgRwlCotc/LH5MLd88jclnz0+WLCTVrEhfHx9vzqbFhUVauXi3smc2j6OPLsTq9lEbKiVUFsjeKvGtoeRz8PP93vHkRuM0sMjNF76g5Sd6JOfDj/cCpd/AR3Pkm1FBZDUo2K/jxJNe1fewFYd+8w26H8rHN4C/5TJcgmKkoyu8Kalm1KyChn/8d+s2pPp8xCPndeVi3s3IzyoOMhRkA6rPw/8nNvmSknett/Lbf8dTpsIZrkqG2IzM6hDPF+N7092YRFmo5HoUCuWgr0SpHT7GXEPR1++FxYP+anwySjvMQfSDHnaOTB+oTQjV0qpWlShR84xEMjxdDkfw8cXQ36ad2NeKsy8Fy55Xz7fKKWUqjeN4OxY1YVDOXZu/XQF5T937EzL59lfNzPlwm6E1FEwxWg00KQGs4AO59hxuT2EB5lrNyAUFCGTopr3kxKq/DQpc9nxp0yi8jeWGaR/SbPekmlhCYZT75R6dE+5+nqTBfrdWJr9oI5j4UlwwRvynjq0Ua6SxrSRkqPinjVOl5tPl+yqEMQBeOyH9QxoG+sN5Hg84C4K/HzuIjD6+dAeEiulXuUkRASREFEma8yUIMfGyo8qPkbrQbL+o+EogPn/8w3ilHDapcn4mU/g8JhIz3NgNEBsqBWTSU9AlFI1p0KPnGOhtOrAGt8gTlkLXsJz6Uc6flwppeqRBnIUAH/vTA/4oeOnNQe4Z1hHsgqLWLcvm3mbD9EsKpizTkgiKTKIEGvDeBt5PB72ZRaweHs6a/dkcnGfZFbtzeSDBTvJyHfQv00sd57RgdZxIVjNNTd22Yc1BGwR4CySniR7l8lEqMJKRqenbvEdLR7TRjIrvp8gE4pAyp4ueFMCQ0fitEvPEZdDymG04euxKTRWvhK7+N2dmutg2qJdAe/+/ap9NIsK5p9DuVx7YgLJnc7FEKg0sM1g+OnuittPuVMCmH6UPR7X7Mlgwon3EI8B4+pPJfBiMEDHc2DYf6AgS8q7wpLAVMVjMydFjqv9fwe+zZ4lZGVl8NyfB5m5NgWLycjok5pzad/mNI2qm5JRpdSxz14uI8dkNOB0N/Jmx7srGeCQshY3UEufpJRSSlVBwzgDr2F2u51+/fqxevVqVq5cSc+ePUv3rVmzhgkTJrBs2TLi4+O57bbbuO++++pvsQ1ERn7gq/FOt4d8h4sbP1rOrjRvj4xnZ23mpct6cmaXRIIbQDBnc0oOl769iOwCJ5NGdOKF2VuYt+Vw6f6f16Xw+8ZDfHPLAE5oVosTsMKbSPPgfuNh/wrIOwwtTg58+7AEMJX7+dki5ATXZJUTXkcB2MKh7PWv/DRwu6WBsak4syL7gPQ6WTFNyrQik+HMJ2SCT3B0Db9Q1ZB5PB5yCgMf1ylZhWxKyeH3jYf4Ytke/rzuHuK2/S49Z8pqP0zKtgoyfLf3vU4yyQLYdjiXuau3Em52k5PvZPCbe7m67zjuuOkOglx5GCzB0tPmg7MkYBkcLYGhnldU3vjYaZem4t+Nh44jpBFz6j/+fwbRrXnlz318tNg75et/v/3Dj6sP8PH1J9Vo5p9S6vjlcLoIK5PxazQcA6VVcR0CZ9xENqfIY9FAjlJK1aNq55d/8MEHfPnllxW2f/nll0ybNq1GFvVv3XfffTRt2rTC9uzsbIYNG0bLli35+++/efbZZ3nsscd4++2362GVDcuJrQKf5LeND2VXWp5PEAekGuOu6as5lON/XHhdOpxjZ8KnK8kucBJiNdEyNsQniFPC4XLz2I/rycx31N5izFZpONv8JLimeAJUs77S28SfUydCaJkeIek74IPh8PV1MH0MfHEVfHsDvHcGZOyUTIQ10+Hji2XK0NynZOJQXip8cyMseVOCOCDNa7+6ViZReRr3h0pVPaE2MwPaBi5ZOrF1DBv2S6ZYvsPFVd8cJvfq3yUAGd0KkrpJFth5r0rA5pYlMOxJCQzevEjGlwcoiSrIPEj0njncsPt+Rq+7kaeCP2PW1cks25vPoHd3kh3aClZ8CJ9c4s06K8iA3/4P/nwe7AEmcoG81z86X0aPr/sael4Z8KbZvW7isxUVSxq3Hc5lyfajaLCslFJ+SGlV2YwcaOzTxw0dR3gvEpXjOeUOCoN0MqBSStWnagdypkyZQlxcxQ/vCQkJPPXUUzWyqH/j559/ZtasWTz33HMV9n3yySc4HA7ef/99unbtyujRo7n99tt54YUX6mGlDUvTqGBOa+f/pOyRc7rw/Cz/zXddbg9Ld9T/CVF6noNth3MBmZyzYldmwNsu35lBTqG3p0au3cmutDw2HshmT3o+dqefhqwej/Qjyd4P9pwjL8hshagW0PxEORlO7ApXfg1B5TKBeo2FEy729h9xFMCCl/w3hXUVwdK34O+P4JsbYP9KKcua/4JkNWTsgp1/ym2N5TJ8Zj/qPWFWDUdRgbynclLAFaAR8FGKCLbwwNmdMBsrXlNtnxCGxWTkQFZh6bZNB/O44uuD5Ax8FK6bBWO+h56XQ3iiZMskdJLJbKfcIeVcgTK88jOwzX+G2B/Hwp4lkLaV4NUf0PyLYbwxPIRJIzphs6fCkjf833/5O5B3yP8+p0MCla7iTKO8VEhZI1NUyr7nTVbc577Ch1vM5Dn8/1y/XrGXfMcRpmkppVQVOMoFciQjp5FHcrL2wvmvF2cDI/3XDAboeSWG4BiCCyteLFNKKVV3ql0Ps3v3blq3bl1he8uWLdm9e3eNLOpoHTx4kBtuuIHvvvuOkJCKU5YWLVrEwIEDsVq9zTmHDx/OM888Q0ZGBtHRFU9M7HY7drs34yQ7u5JeJ41YbJiN5y7twWdLd/P+gh1kFzg5oVkEj4zsQlJkEJsP5ga8b2YlZVl1xVHmJLjI5cFmCRyjNBsNlJzbHsgsYPKMDfyyPgW3B2xmI+MGtOL609oQH16cQZNzEDb+CItelXKmVqfC6Q9DTDsoSPM2AwyJlbIqQ7kTZ1uofIXEwvgFUk5SmA1x7WWaVXCU97ZFuXBgZeAXum8FtB7o50UFSRnX6Y9A0gkSbLKFQ8o6CQzlHqxaAKoBO6aORbcbMnbA/Bdh0wz59+szDnqNgchmNfY07RPC+G7CKfznpw0s3p5OsMXEhb2acUaXRO76YlWF22fmF1HoNhMefpRTpAByUzAuf6/i9qJ84v98BEfbKaQeyiHZX4NikCBmfqoEQnNS5P8NZgiJkWbg+1fI7bqNkiBoUb6Uft04T7J0TFaI60COKZrv5q0IuMwQqwmzvwbO6oiOqWNRqRogPXLqfvx4rR6La7+Uzy3X/QaFGdLHLLyJZAZ/cRWWCUtr7rmUUkpVW7UDOQkJCaxZs4ZWrVr5bF+9ejWxsfWXZunxeBg3bhzjx4+nb9++7Ny5s8JtUlJSKgShEhMTS/f5C+RMmTKFxx9/vFbW3NAkRgRx65B2XHZic2m9YjUSE2ojNddO+4Qw/jnkP5jTr01MHa+0ougQKyFWE/kOFxsOZHPXmR0C3nZk9yZEh1hJz3VwxxerfDKK7E43b/25HYC7zuxAkCMDfrwTtsz0PsDm4v/vew38cJucbIKMUL7gDWg5QE44yzOZIaq5fAViDobo1oHHj0e39D/9yuORPjwz74E5k73bW5wMl7wHX9/gd7pQY3JMHYuZO+Gd06Ew07tt3hTY8D1c9bVMn6oBVrOJE5pF8uZVfchzuDAC6w9kc+OHy/02Nz+jcwKRwf+y39W2uQF3GXYvoNtJ4DYFKDMsYYuETT/BjDu9gdKoFnDh29B+OHQ6R3r5TB/jzc4JjYfzX5P3vC2cCI+HMSe35LEfN/h9irH9W2E1ayDnaBxTx6JSNcDhqpiRUxdTq2r1WGx3JoQnwIfnygAFkFKrAXfAFV9gQMu1lVKqPlX7U+zll1/O7bffzty5c3G5XLhcLubMmcMdd9zB6NGja3yBkyZNwmAwVPq1adMmXnnlFXJycnjggQdq9PkfeOABsrKySr/27NlTo4/f0JhNRppEBtMsOpiYUDnZiguzMfmCEyokmgAMbB9HswYw/SUh3MY9wzsCUu41a30Kt57ersLtmkQGcfewjoTYzBzKLQxYFjZ14U5Sc+yQtcc3iAOSPdH3WvhstDeIAxJg+XSUXK06WrYwOG1i4P29r5bsoPJ6XAa/TKo4ZWL3YsnIOftZOdFtxI6ZY7GoEOa/5BvEKXFog0w6q2FRIVaaRQXTJCqYzknhxIVVDKREBJm5ekCrfz/RzVjJ/Q0GPBhYlW6B+E7+b9Osj/xspo/xHX2buVt643S7VEoUF77sDeKANBWfPlbKrQCDwcBZJzShV4uoCk9xQc+mdEwKr/5rU8AxdCwqVUMcRa5yPXLqJiOnVo/FJt3gk1HeIA7I79y/noPcQ7iDtUeOUkrVp2pfep08eTI7d+5k6NChmM1yd7fbzdixY2ulR87dd9/NuHHjKr1NmzZtmDNnDosWLcJm8z1B6du3L1deeSXTpk0jKSmJgwd9sxlKvk9KSvL72DabrcJjHo+6NYvkq/EDeHLmBlbsyiQ6xMJ1p7bm0r7NifVzUljXrGYTF/VqRpOIIJ79dTOfL9vD7UPb8eVNJ/Ptyn0cznUwvEsi/dt5A097MwoCPp7d6abI5YFtcyru7HwOrPnCfx8btwsWvAznvAiWoKN7MXGdYOSL8Osk71hycxCc8z8Jxjj8ZEY17QXznvb/eDvnw4hnJUjUiB0zx2JBOmz+KfD+1Z9BhxHSZ6kWNIsO4cvx/Xl97la+WbkPl9vD8K5J3D2sA82jK5akVlubIQF3OVudztzdRXyyKpPel7xF8g+X+maYhcbDRe/CzHsDPIBdStKWBmhQ7yyUjLn+EwBIigzizav6sG5fFtOX7yXIYuSKfi1oGx/mN5ilquaYORaVqiEOlwdLmdKquppaVavH4uaZ8jvVn/nPY2hxMoRE1c5zK6WUOqJqB3KsVitffPEFkydPZvXq1QQHB9OtWzdatmxZG+sjPj6e+PgjZxK8/PLL/Oc//yn9fv/+/QwfPpwvvviCfv36AdC/f38eeughioqKsFikE//s2bPp2LGj37Iq5RVqM9OnZTTvXX0iBQ4XRgPEhwdh8tNItb5EhVgZ0a0JfVtFU+TyYDYZSAgPok/LGFwej8/VMoD4Sk7kjAbpl4PVT/AjqhX8MyvwQlJWgyPv6AM5wZHSZLbdUMnuMRilrCQsUYI43S6FtdN97+M6QtPWovzK96u6YzCBpZKAiTWs8qyWGtA8JoT/O68rtw9tjweIDLYQavuXJVUlwhJh4P3w5zO+24OiSOn/KO99fojM/CKu+C6D58/6muaOnYTl/ENIs64YE7tK0+LD/suhAPAU9xcKZL9vj6nEiCASI4IY1DEeIwaMDeh3llLq2OBwun16bsnUqkZeenRoY+B96dsxeBp5M2ellGrkjvqTe4cOHejQIXAfkrrWokULn+/DwuQEvG3btiQnJwNwxRVX8Pjjj3Pddddx//33s27dOl566SVefPHFOl9vYxUdYqUmLtrXpvhw3wCK0WjASMWTt6TIIJrHBLMnvWJmzpldEokItkDb0ys+QW6KjGdOWeN/AbHt/PfIqQ5LsPTDiS4XILUEwfAnpZRq8RvSwPiEiyG2beWPFxT179ajak5ovJTm/fZ//vefeH2tB3IAgiwmmtRGWWRwJJw8HtoOkQbheYeg3ZkUdL6Eaz/ZU9ocfXd6PqM+3U18eDCfXn8D7ROLS50KMqVPVPZ+/4/vdkJcRzi4zv/+5v38btbGxkqp2uDxeIp75JTJyDHWTY+cWtWkh2Qf+xPXEUOA0eRKKaXqRpUCORMnTmTy5MmEhoYycWIl/TugQY/yjoyMZNasWUyYMIE+ffoQFxfHo48+yo033ljfS1P1IDEiiKnjTmLc1KU+wZwTW0Xzf+d2JSzIDIYkGPEM/Hy/944bvoeL34WNP/h/4FPvAmstRrvCEqDdGdCiv5Ry2cKl8WuHs2DLLxVv32EEhPofLa/qgdEI3S+FDd9VyB6hzzgJBDZ2ITHQsj807QkuB1jDKLK7uKKfkSd/2iBli4DVZOSeYR1IiiwTfA2OgsGTYNq5FR/XYIT4jjD0Ufj00or7beHQflitvCSllPLH4ZLMFJ8eOcVNBd1uT+PNAmx/pjTh9zPx0jPoPgxRLfzcSSmlVF2pUiBn5cqVFBUVlf5/Y9CqVSs8nopXQ7p3785ff/1VDytSDVHbhDC+Gj+AA1kFHM6xkxwdQkK4zdv3xxYOPS6HlqfAig8h54BMzEk8AS56B368w1u2ZAmR3jgxdXQibg31/n9wlDz3TLP0X/F4ZAx6x5HS6LjsiHNV/yKawujPJKtr1Sfyb9nnGohpC6HHUANJS3BpdlpEsIlL+yZzeqcEdhzOxWAw0DoulPhwG0GWchlISd3hrGfgt0e9faJsEXDR2/KzC46BkS/A7Ee9PaNi2sAlH0Bkch2+QKXU8c7hLAnk+I4fB3C6PVgbaSDHENkKz5hvMXxzI6TLNE+sYXgGT8IQIPNRKaVU3TF4/EU7VEDZ2dlERkaSlZVFREREfS9H1TW3y1v24rRLo9bs/RI4iWwm/UHM9dgEtDBLpvcUZkNQhJTxBEXW33pq0TFzLLrdEnTzNxbueFZUINNSsvdJ35zwJhCWBObidH5XkUyNy08FkxVCYiHcf9N6VbuOmWNRqaOQmmun739+4+4zO9C3VQwAC7am8urcrWx84iyCrbVfKluixo9Ftwsy90iTfqddPlOEHbufK5RSqjGpdo+ca6+9lpdeeonwcN/RrXl5edx22228//77NbY4pRqcsr1LzDZpQtyQ0ouDIvUDVmOjvVv8C9QnqoTJAlHN5UsppeqJvTgjx2r2HT8O4HS7gboL5NQ4owliWgGt6nkhSimlyqv2GcS0adMoKKjYHLagoIAPP/ywRhallFJKKaVUQ2cvcgH+e+Q0+slVSimlGqwqZ+RkZ2fj8XjweDzk5OQQFORtTulyuZg5cyYJCQm1skillFJKKaUaGruzYrNjY5keOUoppVRtqHIgJyoqCoPBgMFg8Dt23GAw8Pjjj9fo4pSqSdmFRaTm2Fm9NxOTwUj35Ejiw2yEBlW7wlApVU0HsgrYnZbPjrQ82sSF0iIm1HdalVJKNUJ2P82OS/obN/aMHJfbQ0pWAf8cyuVQtp3OTcJpEhVMXFg99gJUSikFVCOQM3fuXDweD6effjpff/01MTExpfusVistW7akadOmtbJIpf6tjDwH783fwWvztlLS3ttogAfP7sylfZOJCLbWz8IKMsCeK2OVwxKk74c6vriKpEG12wW2MAiOru8V1bgdqblc9e5S9mV6y3KbxwTz0bX9aBUXWsk9lVKqYSsprbKa/PXIabyBHJfbw5q9mXy+eBsXtLfSLsjAn2sP8XeKi6cu6kZSZHB9L1EppY5rVQ7kDBo0CIAdO3bQokULDDphRTUi6/Zn8ercrT7b3B74z08b6dMyml4t6jiQU1QIhzfBrIdg53wZc97nWuh3k0y/UseH7P2w5C1Y/h7Yc2TM/fAnIb4zWI6NbJXUHDs3fvi3TxAHYE96ARM+XcGH155ErF7dVUo1UqUZOWWaHRtLeuS4Gm8g51B2IaGFKTwaNJ3Qnz+Gonx6tBzIoQGPMGPFTq48pT3BVs1oVkqp+lLtZscbN25kwYIFpd+/9tpr9OzZkyuuuIKMjIwaXZxSNSG7oIjXygVxynpv/g4Ki6+o1ZnDm+DdoRLEATmJX/gSfDZaRiqrY19OCnx2OSz4n/z7A+xaIO+LQxvrdWk1KS3Pzj+Hcv3uW78/m7Q8Rx2vSCmlao6/HjklGTkuT+MN5NgKDtJu1jWErngLHHng8WDa+QdNpp/NyGZ5pOvvbqWUqlfVDuTce++9ZGdnA7B27VomTpzI2WefzY4dO5g4cWKNL1Cpf8vhdHMo2x5w//7MgtIPYnWiIEMycdzOivtS1sChTXW3FlV/Dm+GA6sqbne75P2Rf2wExvMclQdJC46wXymlGjK7s3hqlTMXcvYDZQI57jr8bFHDgtLWY0z1c1HB5SBuyX8J9uTX/aKUUkqVqnYgZ8eOHXTp0gWAr7/+mnPPPZennnqK1157jZ9//rnGF6jUvxUWZKZvq8B9R/q3jSXUaqq7BTnyvJk4/mzW4+i4sOWXwPt2LQCH/yyWxiYm1Fra+LM8k9FAVIj2hVJKNV72IgnWWNd+Dj8/AHhKS6uKGnFpVdCWHwPus+ycSygFAfcrpZSqfdUO5FitVvLzJQr/22+/MWzYMABiYmJKM3WUakiCLCZuHNjWpxFhiRCriUv7NsfsZ1+tMRilJ04gobF1txZVf0LiAu+zhsr75BgQG2rlkt7JfvddcWJznX6ilGrU7E43BsCUugnyUyF9W5mMnMYbyCEsPvC+oAis5jq8AKaUUqqCap8pnHrqqUycOJHJkyezdOlSRo4cCcCWLVtITvb/YV2p+tYiNpgvbjqZTkneAEr35Ei+Gt+f5OiQul1MaLw0Ng6ky3l1txZVfzqfE3hfn2vkfXIMCA+ycM9ZHRk/qA3BFvngH2I1ceuQttx+RgdCbdosUynVeNmdLqxmI4asXbJh7/JjYvy4scfogPvcJ96IITSxDlejlFKqvGp/gn711Ve55ZZb+Oqrr3jjjTdo1kwm7Pz888+cddZZNb5ApWqC1WSiV4toPr6+H9kFRRgMEBlsJSa0HsaOmyxw8njYPld64pR19nMQrlOrjgsRzWDki/DTXb7bk7pD/1vAXA/vzVqSEB7ExDM7cNXJLSkschFsMREfbtMrukqpRs/udGMxesBVBEERsG8FpuZyQaYxjx8nIhnOnAyzH/HZ7Gl2IsbeY8Gkv7+VUqo+VTuQ06JFC2bMmFFh+4svvlgjC1KqNsWF2RpGKUdEU7jyS2l4u2mmlFN1OR/Cm0JQJWVX6thhC4Puo6DVKbDhB8g7BB3PhoTOEJ5U36urcVazqe6z35RSqpbZi9xYDS7wGCChK6Rv844fb8yBnOBI6HM1tB8G67+FgkzofA6GuPbH5N8opZRqbKoUyMnOziYiIqL0/ytTcjul1BGEJ8lXm0H1vRJVX2zhEN8RBt1b3ytRSil1FOxOF1ZPkVyQCY4Ge86x0SMHIChSvhIeqO+VKKWUKqdKgZzo6GgOHDhAQkICUVFRGAwVR5B4PB4MBgMul46SVUoppZRSxz67040FJ9gipFF9UQFGjxM4BgI5SimlGqwqBXLmzJlDTEwMAHPnzq3VBSnVINhzpdQlPx0sIdJ4trIJDkodj+x5xcdJWvFxEgdhCfW9KqWUqjN2pwsLDrCGye9BwOTMA8Dpdtfn0mpG9gHIPyw9gELiICwRLEH1vSqllDruVSmQM2iQt/SjdevWNG/evEJWjsfjYc+ePTW7OqXqQ+4hmPc0rJgK7uIMs8SucOmHENuuXpemVIORewj++C/8/b73OEnoIsdJXPv6XZtSStURe5Ebi9shpbJWCeQYHTlAI8/IcTnhwCr48mrI2ivbzEFw+qPQ83IIianX5Sml1PGu2uPHW7duzeHDhytsT09Pp3Xr1jWyKKXqjasIlrwFy9/znpwCHFwPH14A2fvrbWlKNRiuIlj+Pix7x/c4ObQBPjwfsvfV39qUUqoOFTrdWDyF0sC+JCOnqCQjpxEHcrL3wrRzvUEcAGchzHoQdi+qv3UppZQCjiKQU9ILp7zc3FyCgjTVUjVyOSmw5E3/+7L2QPr2ul2PUg1RTgoses3/vux9kLq1btejlFL1pNBRhMVtlx45llAAjPZjICNnyy9QlO9/35zJkFvxoq5SSqm6U+Xx4xMnTgTAYDDwyCOPEBLiHSPrcrlYsmQJPXv2rPEFKlWnigrAkRt4f+pWaHVq3a1HqYbIWQD2SiYYpm7RaWxKqeNCQUEBVpxgjQCLDQxGTEXZQEzjzsjZuzzwvrSt4HLU3VqUUkpVUOVAzsqVKwHJyFm7di1Wq7V0n9VqpUePHtxzzz01v0Kl6pIlWBoWBgrmxLat2/Uo1RBZgqUfRPFV5wriOtTtepRSqp4U2u0EUyS/Ew1GsAQX98iJwd2YAznN+sDaL/3vi20HJqv/fUoppepElQM5JdOqrrnmGl566SUiIiJqbVFK1ZvwJOg3Hv56ruK+yGQN5CgFEJYE/W+FeVMq7otops2OlVLHjQJ7EZEGp/TIAbCGYSrOWGzUGTkdRsDvT/gvrxrysE7yVEqpelbtHjkffPCBBnHUsctkgZNuhD7XyJW1EgmdYez3ENG0/tamVENhskDfa6Hv9WA0ebfHd4Kx3+lxopQ6bhQWObHhLO2PgyUYY3Egx9WYx49HJsPVP/r+PjcHwZmToeWA+luXUkopoBoZOWUtX76c6dOns3v3bhwO3xrZb775pkYWplS9CU+EYf+BU26H/HQZJxoSB2EJ9b0ypRqOsAQ483EYcCvkp+lxopQ6LhUWubGaDGAsvvhjCcHgyMFoaOQZOSazlFdd/zvkpcq0wtA4CEsEiw43UUqp+lbtjJzPP/+cAQMGsHHjRr799luKiopYv349c+bMITIysjbWWC0//fQT/fr1Izg4mOjoaC644AKf/bt372bkyJGEhISQkJDAvffei9PprJ/FqobLFgYxbSC5LyR00ZNTpfyxhUFMaz1OlFLHrQIXWMxlMhMtIVCYhcloaNxTqwAMBsnIadIdkvtAdEsN4iilVANR7Yycp556ihdffJEJEyYQHh7OSy+9ROvWrbnpppto0qRJbayxyr7++mtuuOEGnnrqKU4//XScTifr1q0r3e9yuRg5ciRJSUksXLiQAwcOMHbsWCwWC0899VQ9rlwppZRSSjU2dpcBW5DBu8Fsg9wCjAYDTlcjD+QopZRqsKqdkbNt2zZGjhwJyLSqvLw8DAYDd911F2+//XaNL7CqnE4nd9xxB88++yzjx4+nQ4cOdOnShUsvvbT0NrNmzWLDhg18/PHH9OzZkxEjRjB58mRee+21CiViSimllFJKVcbuNmEtm5FjtoKzAJPRgNujgRyllFK1o9qBnOjoaHJyZORss2bNSjNeMjMzyc/309m+jqxYsYJ9+/ZhNBrp1asXTZo0YcSIET4ZOYsWLaJbt24kJiaWbhs+fDjZ2dmsX7/e7+Pa7Xays7N9vpRSdU+PRaUaBj0WlRJFLjdOjL6BHJMNiiSQU9s9cvRYVEqp41e1AzkDBw5k9uzZAIwaNYo77riDG264gcsvv5yhQ4fW+AKravv27QA89thjPPzww8yYMYPo6GgGDx5Meno6ACkpKT5BHKD0+5SUFL+PO2XKFCIjI0u/mjdvXouvQikViB6LSjUMeiwqJQqLXABYrRbvRrMNnHZMhtrvkaPHolJKHb+qHch59dVXGT16NAAPPfQQEydO5ODBg1x88cW89957Nb7ASZMmYTAYKv3atGkT7uIRjw899BAXX3wxffr04YMPPsBgMPDll18e9fM/8MADZGVllX7t2bOnpl6aUqoa9FhUqmHQY1EpUegoDuRYygRyTFZwOzEaqfUeOXosKqXU8avazY5jYmJK/99oNDJp0qQaXVB5d999N+PGjav0Nm3atOHAgQMAdOnSpXS7zWajTZs27N69G4CkpCSWLl3qc9+DBw+W7vPHZrNhs9mOdvlKqRqix6JSDYMei0qJwnxpNWCzWL0bTXJsmAzgKr7IWFv0WFRKqeNXtQM5JpOJAwcOkJDgO2Y2LS2NhIQEXC5XjS0OID4+nvj4+CPerk+fPthsNjZv3sypp54KQFFRETt37qRly5YA9O/fnyeffJJDhw6Vrn/27NlERET4BICUUkoppZSqTGGOlO5bywZTTBLUMeGp9R45Simljl/VDuR4AnTgt9vtWK1Wv/vqQkREBOPHj+f//u//aN68OS1btuTZZ58FpJcPwLBhw+jSpQtjxozhv//9LykpKTz88MNMmDBBr2gopZRSSqkqK8jJAMBiDfJuNMtnYaPBU+s9cpRSSh2/qhzIefnllwEwGAy8++67hIWFle5zuVz8+eefdOrUqeZXWA3PPvssZrOZMWPGUFBQQL9+/ZgzZw7R0dGAZBPNmDGDm2++mf79+xMaGsrVV1/NE088Ua/rVkoppZRSjUthnkyJ8peRY0QDOUoppWpPlQM5L774IiAZOW+++SYmk3fUotVqpVWrVrz55ps1v8JqsFgsPPfcczz33HMBb9OyZUtmzpxZh6tSSimllFLHmvzcbCCYoKBg70ZzSY8cLa1SSilVe6ocyNmxYwcAQ4YM4ZtvvinNclFKKaWUUup4U5CfCwRjK9taoDiQoxk5SimlalO1e+TMnTvX53un00lhYaFPqZVSSimllFLHsvz8XCCeoLKfpkubHbs1I0cppVStMVb1hj/++CNTp0712fbkk08SFhZGVFQUw4YNIyMjo6bXp5RSSimlVIOTX1CACRdmo8G70WgGgxEjrlofP66UUur4VeVAzgsvvEBeXl7p9wsXLuTRRx/lkUceYfr06ezZs4fJkyfXyiKVUkoppZRqSAoK7QQZXL4bDQYwWzFqRo5SSqlaVOVAzvr16xkwYEDp91999RVnnnkmDz30EBdddBHPP/88P/74Y60sUimllFJKqYYkr7AIm9FVcYfJhsnj1h45Simlak2VAzk5OTnExsaWfj9//nyGDh1a+n3Xrl3Zv39/za5OKaWUUkqpBqjAUUSQ0U/5lNlWXFqlgRyllFK1o8rNjps1a8bGjRtp0aIFubm5rF69unQkOUBaWhohISG1skilAsmzO0nNtbM/swCbxURSRBCJEUGYytarK6UahfQ8O6m5Dg7n2IkJtRIfZiMu3Fbfy1JKKb/yi9zYTH6CNSYrxqLGH8hxuT0czC4kJbsQe5GLplHBxIfZCLFVe1aKUkqpGlbl38SjRo3izjvv5MEHH2TmzJkkJSVx8sknl+5fvnw5HTt2rJVFKuVPWp6d9//awZt/bi/9sBQdYuHNq/rQu2U0FlOVE87qVu5BcLsgKAqsGvxUCmB/ZgETp69i8fZ0AAa0iebG3uGc1DqakMj40pG+SinVILjd5DvBFuTnwpHJisnjatQ9chwuFyt2ZfLmvK2c385CiMXAD38XER8dxbgBrYgJtR75QZRSStWaKgdyHn30Ufbt28ftt99OUlISH3/8MSaTqXT/Z599xrnnnlsri1TKn7+2pPLavG0+2zLyixj7/lJm3TWQlrGh9bSyAHJSYNNPsPh1KMyCtkNh4N0Q3QZMenVLHb+yC4p45Lt1LN6eTkSQmY9GNafV/hlEzv8Y5hXi7nwexv63QnRLaSSqlFL1rTCTAo8Vm9l/IMeIE5er8U6tSsksxJx3kBdbLSF61QfgyGVg62Ecbj2eNXvSGdwpqb6XqJRSx7Uqnz0GBwfz4YcfBtw/d+7cGlmQUlVxOKeQ//22xe8+u9PN7A0Huf60NnW8qkrkHoLvboFtv3u3rfkcNn4PN8yBhC71tzal6llanp05mw8B8Ob5Ten2x40YD60t3W9c9g6s/xqunwsxrepplUopVUZ+OnmeIGxmP9m/Jismt7NRZ+S4cw7Sc/FdmPctLt0WvPZjWmz5HsulP5OaE62lr0opVY/+Ve3J008/TWZmZg0tRamqc7o87MkoCLh/44HsOlxNFaTv8A3ilCgqgNn/B4UNbL1K1aE8uwuPB1rGhtCuaJNPEKdUfjosfg2c9rpfoFJKlZefSj5B2MymivvMNoyexh3Iicrd6hPEKWXPIWb5i1jcgT+DKaWUqn3/KpDz1FNPkZ6eXlNrUarKrGYjHRLDAu4/sVVMHa6mCjb+GHjf1tlg10COOn6FB5kxGQ2c3DKChG1fB77hhu8loKOUUvUtP418bNgsfpLbjRaMHmejbnYc8c+3AffZts4kxJVbh6tRSilV3r8K5Hg8jfcPlGrcYsNs3H9WJ7/7IoLMnNo+ro5XdASVNTU2WQHt+6GOX3FhNi7u3YwilweXMSjwDc1B2iNHKdUwFAdygqx+AjkmCyZPUaMO5GCtpM+gOQiLv5IypZRSdabKv4WfeOIJ8vPza3MtSlVLrxbRPH1xNyKCvB+i2iWE8cVN/WkWFVyPK/Ojy/mB93W/HEJi624tSjUwoTYz9wzrSFRoEIc6XRX4hn2vhdCEuluYUkoFkp9GHiEEWfz1yLFgauSlVYaeVwTc5+41FkLj63A1Simlyqtys+PHH3+c8ePHExLizSzYsGEDTZs2rZWFKXUkkcEWLumdzMD28WTkObCYjMSEWhtm872IZnDaPfDXc77bo1rAwIlgqSQLQanjQEJEEHcP74QnLxZXt9GY1n7ue4PEbtD9UjDqVWClVAOQl0qupyXBflrkYLJi9BRR1IinVhmiW8FJN8LSt313xLbF2O9GMFnqZV1KKaVElQM5/sqomjdvXqOLUaq6zCYjTaOCadrQMnDKC46CAbdCx7Nh+buQlwpdL4TWAyEyub5Xp1SDEGozg60JDP8P9BkLy94DZz70vBKa9YEIvXCglGoYXDmHKMBGsMX/+HGT2964S6tCYmDwJOh2CSx9D+yZ0O1SaNEfIpvV9+qUUuq4V+VADoBBexModfSCoyG5DzTtCW4nmBtg5pBSDUFYvHw17wceN5it9b0ipZTykZeTCUCQv0/SJism8nC6XHW6phoXEitfzfqA26WfW5RSqgGpViCnQ4cORwzm6BQrpY7AaJIvpVTlTNX6E6WUUnUmL0emTQab/WXkWDDhxtmIS6t8GM3ypZRSqsGo1m/lxx9/nMjIyNpai1JKKaWUUg1ebl4eAMEBM3LcjT8jRymlVINVrUDO6NGjSUjQiSFKKaWUUuo45XaTW2gHIKjSjJxG3CNHKaVUg1bl8R/aH0cppZRSSh33CjPJc0vvrhC/GTnFgRz3MVJapZRSqsGpciDH39QqpZRSSimljiu5h8hFpmX6z8ixYsKlGTlKKaVqTZVLq9x6VUEppZRSSh3v8g6TSxBQeY+cRj1+XCmlVINW5YwcpZRSSimljnt5h8j1BGMxgtkYuEdOkQZylFJK1RIN5CillFJKKVVVeankGsIJsQTYb7JIaZUmsyullKolx1QgZ8uWLZx//vnExcURERHBqaeeyty5c31us3v3bkaOHElISAgJCQnce++9OJ3OelqxUkoppZRqVHIPkm2OJtRffxwAowWTwY22yFFKKVVbjqlAzjnnnIPT6WTOnDn8/fff9OjRg3POOYeUlBQAXC4XI0eOxOFwsHDhQqZNm8bUqVN59NFH63nlSimllFKqUchJIccYRYglQCDHYMBkMOLBoH1ylFJK1YpjJpCTmprKP//8w6RJk+jevTvt27fn6aefJj8/n3Xr1gEwa9YsNmzYwMcff0zPnj0ZMWIEkydP5rXXXsPhcNTzK1BKKaWUUg1e9gGyDeH+R48XM5kkyKMjyJVSStWGYyaQExsbS8eOHfnwww/Jy8vD6XTy1ltvkZCQQJ8+fQBYtGgR3bp1IzExsfR+w4cPJzs7m/Xr1/t9XLvdTnZ2ts+XUqru6bGoVMOgx6I67uXsJ4swggNl5AAmo3zErs0R5HosKqXU8euYCeQYDAZ+++03Vq5cSXh4OEFBQbzwwgv88ssvREdHA5CSkuITxAFKvy8pvypvypQpREZGln41b968dl+IUsovPRaVahj0WFTHvdyDZHlCCA3U7Ji6CeTosaiUUsevBh/ImTRpEgaDodKvTZs24fF4mDBhAgkJCfz1118sXbqUCy64gHPPPZcDBw4c9fM/8MADZGVllX7t2bOnBl+dUqqq9FhUqmHQY1Ed15x2KMgg220L3COHMoGcWiyt0mNRKaWOX5VU9zYMd999N+PGjav0Nm3atGHOnDnMmDGDjIwMIiIiAHj99deZPXs206ZNY9KkSSQlJbF06VKf+x48eBCApKQkv49ts9mw2Wz//oUopf4VPRaVahj0WFTHtRzJ4M5xmQkJNLWKsj1yai8jR49FpZQ6fjX4QE58fDzx8fFHvF1+fj4ARqNvkpHRaMRdfDWkf//+PPnkkxw6dIiEhAQAZs+eTUREBF26dKnhlSullFJKqWNKTgoeD+Q4TUcorTIBtRvIUUopdfxq8KVVVdW/f3+io6O5+uqrWb16NVu2bOHee+9lx44djBw5EoBhw4bRpUsXxowZw+rVq/n11195+OGHmTBhgl7RUEoppZRSlcs5gB0LRW5D5aVVppIeOTq1SimlVM07ZgI5cXFx/PLLL+Tm5nL66afTt29f5s+fz/fff0+PHj0AMJlMzJgxA5PJRP/+/bnqqqsYO3YsTzzxRD2vXimllFJKNXg5KWQZZYhG5YEcycgpqsVmx0oppY5fDb60qjr69u3Lr7/+WultWrZsycyZM+toRUoppZRS6piRvZfM4GQogHBr4JuZiwM5Li2tUkopVQuOmYwcpZRSSimlalXWXjKtTQAIqyQjx2iSa6VFWlqllFKqFmggRymllFJKqarI3E2mJRGA0CqUVmmzY6WUUrVBAzlKKaWUUkpVRdZessxxAJVPrSrOyHG5NSNHKaVUzdNAjlJKKaWUUkfitEPuQTJN0YRawGSsJCPHLFEebXaslFKqNmggRymllFJKqSPJ3gdAJhGV9scBMJdk5BQ5an1ZSimljj8ayFFKKaWUUupIsvYCkOkJPWIgx2gubnbsKKj1ZSmllDr+aCBHKaWUUkqpIykJ5LiCKu2PA2AyyQ2cdg3kKKWUqnnm+l6AUkoppZRSDV7GLgiOJt1uIMxW+U1NxRk5Toe9DhamlFLqeKMZOUoppZRSSh1J+nYIb0J6oYdwa+WlVSazFQCno7AuVqaUUuo4o4EcpZRSSimljqQ4kJNRhUCOuTiQ4yrSjByllFI1TwM5SimllFJKHUn6djxhSWTYPYRbK7+psWT8uAZylFJK1QIN5CillFJKKVWZwmwoSCc3JBmnG8KPMLXKZCludqw9cpRSStUCDeQopZRSSilVmYwd8h9rE4AjllYZzDbMOClyOGp9aUoppY4/GshRSimllFKqMunbAUgzJwBHDuRgNGHGhcNZVNsrU0opdRzSQI5SSimllFKVSdsKtggyXMEAR+yRA2DBhcOhgRyllFI1TwM5SimllFJKVebwZohqTmqBB4AI2xEycgCLwU2RZuQopZSqBRrIUUoppZRSqjKHN0FEMqkFHsIsYDYeOZBjNrhxFDnrYHFKKaWONxrIUUoppZRSKhC3C1K3QFQL0go8RFYhGwdKAjmuWl6cUkqp45EGcpRSSimllAokczc47RCZTGqBm4gjNTouZjZ4cLg0kKOUUqrmaSBHKaWUUkqpQFK3yH+jWpBa4KlSfxyQHjkOp7sWF6aUUup4pYEcpZRSSimlAjm4HiwhEBJXrUCO2eChyKWBHKWUUjVPAzlKKaWUUkoFkrIWYtqAwcDhfE/VS6uMBs3IUUopVSs0kKPUscBVJM0YlWps3E5w6VQXpVQDlrIGolvjdHvIKPQQHVTVQA44XJ5aXlwdcLvlc4ZSSqkGw1zfC1BK/QvZ+2DPUljzBdgi4cTrILYthMTW98qUqlzuIRnnu/x9OUHoPRaa9IDwpPpemVJKeTnyIG0bdBxJaoEHD1Q5kGMyGihyNOKMnPx0SNsKy94DexZ0uxSa94PIZvW9MqWUOu5pIEepxiprH3x0gbcJI8Caz+GkG2HQJAjVYI5qoHIPwcx7YMP33m2bZkByP7h0GkQ0qb+1KaVUWQc3AB6IacPBPMmuqWogx2I0YHdV7bYNTn4a/PEsLHnDu23zzxDXHsZ8B5HJ9bY0pZRSjai06sknn2TAgAGEhIQQFRXl9za7d+9m5MiRhISEkJCQwL333ovT6ZuyP2/ePHr37o3NZqNdu3ZMnTq19hevVE1zFcGyd32DOCWWvg2Zu+p+TUpVVcoa3yBOib1LYMsvdb8epZQK5MAqMJohqgWH8iW7JrqqzY5NBhxuQ+Msfc7c7RvEKZH6Dyx9R0utlFKqnjWaQI7D4WDUqFHcfPPNfve7XC5GjhyJw+Fg4cKFTJs2jalTp/Loo4+W3mbHjh2MHDmSIUOGsGrVKu68806uv/56fv3117p6GUrVjLzDsGJa4P0rP6q7tShVHUX5sOStwPuXvQN5qXW3HqWUqszeZRDTFkwWDuZ7MBogwla1u5pNRhxYwJFbu2usDSs/CbxvxTT5HKKUUqreNJrSqscffxwgYAbNrFmz2LBhA7/99huJiYn07NmTyZMnc//99/PYY49htVp58803ad26Nc8//zwAnTt3Zv78+bz44osMHz68rl6KUv+exwNOe+D99py6W4tS1eF2Q1FB4P1FBeBpxD0llFLHlj1LIekEAA7muYm2GTAaqlpaZcSBWf4mB0XW5iprniMv8D5noXwOUUopVW8aTUbOkSxatIhu3bqRmJhYum348OFkZ2ezfv360tucccYZPvcbPnw4ixYtCvi4drud7Oxsny+l6l1wFHQ8O/D+HpfX2VLqih6LxwhbGHQfHXh/14sgOKbu1qOqTY9FddzIS4OMHRDXCYB9OR7igqve88ZsNkkgp7B2jpFaPRa7jQq8r+NICIqquedSSilVbcdMICclJcUniAOUfp+SklLpbbKzsyko8H+FeMqUKURGRpZ+NW/evBZWr1Q1WUNh0P1gi6i4r1lfSOxa92uqZXosHkPaDoHYdhW3hyXI9CpTo0kWPS7psaiOG/uWy3/jOwKwN8dNbEg1Ajkmk5RW1VKWbK0ei4ldIPmkittt4TB4EthCa+65lFJKVVu9BnImTZqEwWCo9GvTpk31uUQeeOABsrKySr/27NlTr+tRqlRMG7hxHvQaC6FxENUShv0HLvv4mBzhrMfiMSSyGYz9XqarRTSVAM7Jt8B1v0F0y/penToCPRbVcWPnfAiJhTC5CLg31129jByTiSKPGey1k5FTq8dieBJc+iEMexKiW8nPodcY+dwR06bmnkcppdRRqdfLnnfffTfjxo2r9DZt2lTtj0VSUhJLly712Xbw4MHSfSX/LdlW9jYREREEBwf7fVybzYbNVsWudkrVJaMRYtvC2f+FIQ+AwQSh8bL9GKTH4jEmMhkG3gd9x4EHCIkBs/77NgZ6LKrjxo4/IKkbGAw43R4O5lWvtMpiMRf3yKmdQE6tH4sRTSTI3m2U9C4LigKr/8/LSiml6la9BnLi4+OJj4+vkcfq378/Tz75JIcOHSIhIQGA2bNnExERQZcuXUpvM3PmTJ/7zZ49m/79+9fIGpSqF5Zg+VKqsTGZILxJfa9CKaUqKsiAA2tgwG0AHMj14PJAfEjVL5aYTSaKarFHTp0wGiE88ci3U0opVacazaX73bt3s2rVKnbv3o3L5WLVqlWsWrWK3FwZ6Ths2DC6dOnCmDFjWL16Nb/++isPP/wwEyZMKL1aMX78eLZv3859993Hpk2beP3115k+fTp33XVXfb40pZRSSinVkOxcAHggqTsAWzNdADQLq05plaFWe+QopZQ6fjWajpKPPvoo06ZNK/2+V69eAMydO5fBgwdjMpmYMWMGN998M/379yc0NJSrr76aJ554ovQ+rVu35qeffuKuu+7ipZdeIjk5mXfffVdHjyullFJKKa8tv0Bk89Kec1sz3NhMEFud0iojkpGjgRyllFI1rNEEcqZOncrUqVMrvU3Lli0rlE6VN3jwYFauXFmDK1NKKaWUUscMtxu2/AotTyndtDXTTbMwA0ZDNTJyjAacmHAVZmOqjXUqpZQ6bjWa0iqllFJKKaVq3YGVkHcImnvHb69PddEsvHofm4OKozcFBfk1uTqllFJKAzlKKaWUUkqV2vA92CIgoTMABUUeNqW5aR9dvY/NNrNk7+TnayBHKaVUzdJAjlJKKaWUUgAeD6z9SsqqjNKBYG2qC6cH2kdXr0DKVpKRk59X06tUSil1nNNAjlJKKaWUUgB7lkL2Pmg9sHTT/L1OwizQIqLq/XHAG8jJy8utyRUqpZRSGshRSimllFIKgJUfQVgiJHYt3TR3t5Nu8aZqNToGb2mV9shRSilV0zSQo5RSSimllD0H1n0N7c4Ag3xEPpjnZm2qm14J1Z87VdLsOL/QAS5nTa5UKaXUcU4DOUoppZRSSq3+HJyFEsgpNnuXE6MBeiVWP5BjMxU3O8YGBek1tkyllFJKAzlKKaWUUur45nbD4teh5QAIjS/dPHtnEZ1jjYRZq1dWBRAkvZIpwAZ5h2tqpUoppZQGcpRSSiml1HFu04+Qvh06n1+6Kb/Iw6J9LnofRVkVgMUIBjzkezSQo5RSqmZpIEcppZRSSh2/XE74fTI07QMJnUs3L9rvxOE+urIqAIPBQJDJIKVVeak1tVqllFIKc30vQCnVCOSnQ84B2LUQrGHQop9M9bCG1vfKlBLOIsjZD/tXyXs1uS9EtYCwhPpemVKqoVvyJqRthXNe9Nn8xx4niSEGkkKrX1ZVwmY2UOAJbZwZOY58yD0Ie5ZII+gWJ0N4UwiNre+VKaXUcU8DOUqpyuUegl8fgrXTvdsMRjj3Jeh6IdjC629tSoEEcfYsgk8vhaIC7/ZmJ8JlH0JE0/pbm1KqYUtZB3OegM7nQWw7n11/7JGx44Zqjh0vK8gM+e5I+VvamDhyYdNM+G48uF3e7Z3Pg7Ofg/DE+lubUkopLa1SSh3Bll99gzgAHjf8cBtk7qmfNSlVVs5++GSUbxAHYN8y+ONZKCqsn3UppRq2zN3yuyOyOfQe47NrZ5abXdkeeiT8u4/KNhMUmMIaX0ZO1j749kbfIA7Axh9gw3fg8dTLspRSSgkN5CilAss9BAv+F3j/ig/rbClKBbTvbxkZ7M/qTyCvkV0JV0rVvoyd8MHZgBuGPAzmIJ/dc3YXYTFC17ij649TIshsINsQLiVKjcma6YGDNQtfbnyvRymljjEayFFKBeZ2Vn4VMWu33Eap+pS9L/A+px1cRXW3FqVUw5e6Fd4/CzwuGD4FQuMq3OTXHU66xhkJNh99WRVAhNVAGtHSg6cxydwVeF/e4YqZOkoppeqUBnKUUoFZw6F5v8D72w8Ho7baUvUs+cTA+6JaaFNupZRX2jaYOhJMFhj+NITGV7jJ/lw3Sw+4ODHp3/99i7AZSPWEQ8YucDr+9ePVmQ7DA+9LPlF/ryqlVD3TQI5SKrCgcDj9ETD6SS0PS4C2p9f9mpQqL7o1NOnhf9+w/0B4Ut2uRynVMKX+I+VUJgsMexJCYvze7LONDmwm6N/s35VVAUTaIM0ZJNk/GTv+9ePVmRb9/TeKNxjhzCcgOKrOl6SUUspLAzlKqcrFtYerf4KELvK9wQDth8E1P0NU8/pdm1Ig01NGfwY9r5ITNJDmpaOmQutB9bo0pVQDsXe5lFOZbRLECY72e7OtGS7eXePgjFbmf11WBRBpNZDmMEm7mdR//vXj1ZnIZBg3EzqeLcEbgPiOMPYHiO9cv2tTSiml48eVUkdgCYaW/eXDmz1bsnOCYyAoor5XppRXZDMY+SwMug/cRWAJhYgm9b0qpVR9KyqExa/DvCkyXnzIQxAUWeFmTreHd9Y4eHWFnfhgAxe2t9TI00fYDBS5DeRY44lI3VIjj1lnYlrDRW9Dfpr0w7NFSDauUkqpeqeBHKVU1YTFy5dSDZUlBKJb1vcqlFL1zeWE3Ytg80yZvlSQDp3Ph95jvVl7ZTjdHm6eVcDvu50Ma2Xmko4WQiz/PhsHpNkxQFpIu8YXyAGwhcuXUkqpBkUDOUoppZRSqvE7uB7+ngprv5LgTUgstDwFOo2EiP9n777jo6jTB45/tvdN74VA6L0jClYQe2+IimIvpx7eefqzt1Pv1PM8O4hdsXdFBUQFadJBektCettsNtt3f38MJITshmIqPO/Xi1eS+c7OfmfJTGae+X6fJyPqy/69xMucvAB3jDAwNOXP58XZW4JJCeTkWQfQdesXEAqBWjIbCCGE+HMkkCOEEEIIITqHoB8CXqViYjgEjgLY8Qus+gAKlihTf7sdDzljlKlUquaDJp9v9vPKKh+T+upaPIgDkGxWYdHBWv1gjqt9FQqWQnYz1SCFEEKIAyCBHCGEEEII0bGEw+BxKJWedi2Dnb9B/hIlcEO48boqDaQPhuPuguyjlCDPPgKhMD/nB5hfEKS0LoReo8LhDTE3L8ixmRpO79Y6l8QqlYqcGDWr6hKUBMurP5BAjhBCiD9NAjkHKRxWLh5qamrauSdCHJ5sNhsq1f5zE8ixKETrarNjMRxWElSHgsrPaq3y7wDeu0232VZCQcIBLw5PAKc3hN5gJN6iR6dpwek4oYAysgWUESsafdPPZvdn6Pf7qagLEAirsZuM2Ezaxr8X4TD4nKidRaiq86ipKKSi2kG5O8xmt5XNnhiCKj1drUF62Tx0NTpJ0zjQBr0Q8OL0+HHUedB7ykjwFmD0VaEKeCDoRRUOEQ7DLpKYZTiFn0K349ebGB3v5JTEMnJMXjDHEYrrRlhrwuWHqjIvobAXjQrcASiqC7OiNMwX28KU1EGKGRKNEAiDRgWX91ZxfGYIr9fbcp/vPnrGhPl2R4gtfc+n++/Tccf2xN/3fKWC1gGQv4tCdAwHeiwK0RZU4T1nfXFACgoKyMqSkstCtBaHw4Hdvv+KWHIsCtG6WuNY3HCzhV6JLT995XDgCesY5X0BB9b27sphzR52tvl7ujHiVylJlu/RvsO12m8BuH2Wh/8u9u339fJ3UYiO4UCPRSHaggRyDlIoFKKwsJBwOEx2djb5+fkd/oCuqakhKyurU/QVpL+traP390Cfduw5Fvddv6PvX2s7kvdf9r1l9/3PHovt5Uj+PWhJ8jm2jJb4HNv7WJTfhYMjn9fB6UyfV0f5OycEyNSqg6ZWq8nMzKwftmq32zv8SWePztRXkP62ts7W333tORaj6ez792cdyfsv+962+76/Y7G9HMm/By1JPseW0RafY2sfi/K7cHDk8zo48nkJcXCk/qEQQgghhBBCCCFEJyGBHCGEEEIIIYQQQohOQgI5h8hgMPDAAw9gMBxYxYH21Jn6CtLf1tbZ+nuwDvf9258jef9l34/Mfd+XfBYtQz7HlnE4fI6Hwz60Jfm8Do58XkIcGkl2LIQQQgghhBBCCNFJyIgcIYQQQgghhBBCiE5CAjlCCCGEEEIIIYQQnYQEcoQQQgghhBBCCCE6CQnkCCGEEEIIIYQQQnQSEsgRQgghhBBCCCGE6CQkkCOEEEIIIYQQQgjRSUggRwghhBBCCCGEEKKTkEDOQQqHw9TU1BAOh9u7K0Ic0eRYFKJjkGNRiI5BjkUhhDhySCDnIDmdTmJiYnA6ne3dFSGOaHIsCtExyLEoRMcgx6IQQhw5JJAjhBBCCCGEEEII0UlIIEcIIYQQQgghhBCik5BAjhBCCCGEEEIIIUQnIYEcIYQQQgghhBBCiE5CAjlCCCGEEEIIIYQQnYS2vTsgREWtl50VdXy1qhCVCs4alE5WgpkEi6G9uyaEaEVuX4Bih4fv1haTX1XHsT2SGJQVS3qsqb27JoQQh8TjD1Lk8PD92mJ2VroY0z2RIdlxcl4TQgjRoiSQI9pVmdPLA1+u5ds1xfXLZizYwXlDMvi/0/qQaJNgjhCHI48/yLxNZdz87nJCYWXZ+0vyyYg18f51R5Edb27fDgohxEHyBYL8urmcG95ZRnD3ie39Jfmk2o3MvO4ochIt7dxDIYQQhwuZWiXa1bKdVY2COHt8umIXa3Y52qFHQoi2UOr0cuv7K+qDOHvsqnbz6Nd/UOsJtE/HhBDiEJXUeLnlveX1QZw9ims8PPjlOmrc/nbqmRBCiMONBHJEu6lx+5n+67ao7dPnb6PWKxc9QhyOVuVX4w+GI7bNXl9CZZ2vjXskhBB/zh9FNXgDoYhtP28uo0rOa0IIIVqIBHJEu/EHQ9R6oz91d3oCBKLc6AkhOjdHM0+mQ2EIBCPfDAkhREfV3IibcJiowetOIxyGZW+CR0ZMCyFEe5NAjmg3MSYdJ/dNidp+av80bEZdG/ZICNFWhnaJi9rWNdGC1Sgp3IQQncvAzNiobZlxJmyd/bxWtQO+uhW+v6e9eyKEEEc8CeSIdqPVqLlweBZx5qbBmiSrgTMHpaFRq9qhZ0KI1pZqN0QM5KpU8NBZ/Ui2GduhV0IIceiSbQbOHJgWse3hs/qRYu/k57XyTY2/CiGEaDcSyBHtKivezGc3HcN5QzLQa9QYtGouHJbJJzeOJjNOqtYIcbiKtxh49Jz+/N9pfUiyGlCpYEh2LB9dP5phzYzWEUKIjirOouf+M/tx3xl9SLYp57WBmTF8cN1RjOyW0N7d+/PKNihfq3Yq06yEEEK0G1U4LGfig1FTU0NMTAwOhwO73d7e3Tls1PkCOOqUueWxZh0mfScffixanRyLh4dQKExprZdQKIxJryHOrG/vLomDJMeiEI2FQmHKar0EQ2GMOg3xlrY5r7X6sfj5TbDyXeX7O7eDOb7l30MIIcQBkbtl0SGY9VrMErwR4oijVqtI7ezTDYQQYi9qtarzT6OKpHwTWFOgtgTqKiSQI4QQ7UimVgkhhBBCCCGaV1cJMVm7v69o374IIcQRTgI5QgghhBBCiOZ5HGDbncxZAjlCCNGuJJAjhBBCCCGEaJ63RplaBRLIEUKIdiaBHCGEEEIIIUR0AS8EfWC0gcEmgRwhhGhnEsgRQgghhBBCROepUb7qLGCIkUCOEEK0MwnkCCGEEEIIIaLz7g7k6C3KqJy6qvbtjxBCHOEkkCOEEEIIIYSIzlOtfNVbdk+tKm/X7gghxJFOAjlCCCGEEEKI6OqnVplBb5epVUII0c4kkCOEEEIIIYSIbu+pVXqzUopcCCFEu5FAjhBCCCGEECK6vUfk6Mzgq23f/gghxBFOAjlCCCGEEEKI6Lw1oDOBWqN89UogRwgh2pMEcoQQQgghhBDReWqUaVXQMCInHG7fPgkhxBHssAjkvPDCC+Tk5GA0Ghk1ahRLliw5oNfNnDkTlUrFOeec07odFEIIIYQQorPy1YLWpHyvM0M4BP669u2TEEIcwTp9IOeDDz5g6tSpPPDAAyxfvpxBgwYxYcIESktLm33djh07+Nvf/sbYsWPbqKdCCCGEEEJ0Qn43aA3K97rdAR2vs/36I4QQR7hOH8h55plnuPbaa7nqqqvo27cvL7/8MmazmRkzZkR9TTAYZNKkSTz00EN069atDXsrhBBCCCFEJxPwgEavfK8zK18lT44QQrSbTh3I8fl8LFu2jHHjxtUvU6vVjBs3joULF0Z93cMPP0xycjJXX311W3RTCCGEEEKIzstf1xDI0e8J5NS0X3+EEOIIp23vDvwZ5eXlBINBUlJSGi1PSUlhw4YNEV8zf/58XnvtNVauXHlA7+H1evF6vfU/19TIHy0h2oMci0J0DHIsCtExtOmxuPfUKq1MrRJCiPbWqUfkHCyn08nll1/OtGnTSExMPKDXPP7448TExNT/y8rKauVeHp4qar1sKKph7oZS1uxyUFrj+VPbc9T5qHR5CYakYsKRQo7Fw4ejzsfWslrmbihl+c4qiqrdhPepfuL1B6l0eanzBtqplyIaORaFiK7OG2BnhYt5G0tZsKWcgqo6fIFgq7xXmx6LfnfTETk+mVolhBDtRRXe9+q5E/H5fJjNZj7++ONGlacmT55MdXU1X3zxRaP1V65cyZAhQ9BoNPXLQqEQoEzJ2rhxI7m5uY1eE+lpR1ZWFg6HA7vd3gp7dfgprHZzy3vLWZ5XXb8sN8nCa5NHkJNoOahtldZ4WLStgtcX7MATCHLmwHTOGpxOZpy5hXstOho5Fg8PZU4Pj3y9ni9XFdYvS7IamHHlCPql2/EHQ+ysrGPaL9tYVVBNdryZG4/PpUeyDbtJ1449F3vIsShEZNV1Pt5ZtJNnZ28msPtBk0mn4emLBnF8ryTM+pYdCN+mx+L0cWCMgWNuh6AP3jkPzn0FBl3Ssu8jhBDigHTqqVV6vZ5hw4YxZ86c+kBOKBRizpw53HLLLU3W7927N2vWrGm07N5778XpdPLf//434pMMg8GAwWBolf4fCWrcfu79bE2jIA7A1jIX17z1O+9fO4okm/GAtlXm9HDHh6v4dUt5/bL1RRt5a+FOPrphNFnxEsw5nMmx2Pn5gyHeXrizURAHoKzWy6Tpi/j2trHkV9Zx+WtL6m+CNpXUMnt9KY+c3Y8Lhmdh0mkibVq0ITkWhYhs+c4qnvphU6Nlbn+QW95bzje3jqVPWssGV9r0WPTXgSVZ+V6tA7VWplYJIUQ76vRTq6ZOncq0adN48803Wb9+PTfeeCMul4urrroKgCuuuIK7774bAKPRSP/+/Rv9i42NxWaz0b9/f/R6fXvuymGpwuVl7sayiG1bSmspc3ojtkWyodjZKIizR3GNh7cW7mi1octCiJZR5vTy+oIdEdtqPAFWFzj49/eb6oM4e3vk6/WUH8T5Qggh2lKly8vzP22J2BYKw/uL8wgGQ23cqxbk9zTkyFGpQG+RQI4QQrSjTj0iB+Diiy+mrKyM+++/n+LiYgYPHsysWbPqEyDn5eWhVnf6eFWn5fI2H1ypdPkOaDvBUIj3l+RFbf98RSFXj+lGaow8rReio/IFQjibyXmzvcxFIBT5RscXDLGjwiUj74QQHVKtJ0BBlTtq+/YKF3X+IDZNJ70m9dc1BHJASXgsgRwhhGg3nT6QA3DLLbdEnEoFMG/evGZf+8Ybb7R8h0Q9u1GLVq2K+IQdIMV+YNOqANQqVfRGFTTTKoToAIw6Nck2A6VRRtb0y7Dz+m/bo75eJUe5EKKD0mvV9E61RT+/pdsxdNYgDuxOdrxXIEdnVII7Qggh2kUn/osiOoNEm4FLRkSuojCmewKJ1gOb261Rq5k4Mjtq+/lDM4i3ytQ4ITqyFLuRqSf3jNiWGWeiW5IVkz7yqDqDVk2XBBmNI4TomBKtBq4/LpdIz5xMOg3nDM5A35lzfAU8oN3rOktrlKpVQgjRjiSQI1qVWa/ltnE9mDy6C/rdT6LUKjh9QBr/vnAQcZYDD770TLFxUu/kJssz40xcflQXdJ35SZcQRwCVSsXJfVO457Q+WA0NA0JHdo3j3WtGkR1v5pkLB6PTNL0TevjsfiTZJMGuEKJj0mrUdE008+8LBpG817kqN8nCy5cNJcHWiR82hUJKIGfvETlaI/hkRI4QQrSXw2JqlejYkmxG7jqtN1eP7YrTE8Ci15Jo02M1HFwp4SSbgcfPH8CKvGpeX7Adjz/EWYPTmdAvlYxYUyv1XgjRkuItBq48pgunDUjF4Q5g0KlJsOiJNSs3OYOyYvjutmN587ft+Nm8ewABAABJREFUrMx30CXezHXHdaNrogVjZ36aLYQ47KXHmjm2h5puScOocvnQqlXEWfRkxJhIOMARyB1SwKN81e4byHG1T3+EEEJIIEe0DZNOS3b8n/91S7YZmdAvlaNzEwiGwsSYdKiay50jhOhwdBoNGXFmMuKatum1GronW7nvjL7UeYMY9GpMOvlTJYToHJLtRpJsBhxuPxq1Cpvx4B5adUgRAzkGCeQIIUQ7kqtj0SkdFhdGQoio9FoNeq2MwBFCdD4qlap+lOFhYU9SY81e+6QzgbO4ffojhBBCcuQIIYQQQgghovDvLqu+74gcv4zIEUKI9iKBHCGEEEIIIURk9YEcY8MyrUmmVgkhRDuSQI4QQgghhBAisj2BnEZTqyTZsRBCtCcJ5AghhBBCCCEiC0QI5GiNDQEeIYQQbU4COUIIIYQQQojIAl7la6NAjgmCPgj626dPQghxhJNAjhBCCCGEECKyiIGc3YmPZXqVEEK0CwnkCCGEEEIIISKrD+RoG5bpTMpXCeQIIUS70O5/FSFaRpXLhycQRK9Rk2A17P8FQogOobrOh9sfRKtWk2STY1cIIZpz2F3vBDzKV7WuYZmMyBFCiHYlgRzR6pxuP2sKHfxr1kY2lTjJijNz+/gejOqaQLxFv/8NCCHahcsbYEOxkydnrWftrhrSYoz85cQejO2ReHjcnAghRAuqcftZu8/1zl/H92Rk1/jOfb0T9IJao/zbQ7t7RI5fAjlCCNEeZGqVaFXBYIjZG0q4dNpiVuZXU+cLsrHEyY3vLOet33ZQ5w20dxeFEBGEw2EWbq3g/Jd+Y8n2Kup8QbaWubj9g5X8b+4WajyS4FIIIfYIBEP8+EfT650b3lnGO4s6+fVOwNs4Pw4o5cdBRuQIIUQ7kUCOaFUlTi8PfvlHxLbnf9pCea23jXskhDgQJTVe7v18bcS2NxfuoEKOXSGEqFfq9PLQ1+sitj03ZwvlLl8b96gFBTxNAznaPYGcurbvjxBCCAnkiNZVVefD4Y785D4QClNQ7W7jHgkhDkSNx09xjSdiWzgMG4qcbdwjIYTouKrqfNS4I4+6CYTCFFZ14uudgA80usbL6gM5tW3fHyGEEBLIEa1Lq27+V8yo1TTbLoRoH1q1qtl2s0GOXSGE2GN/50yDrhNfcjc3IscvI3KEEKI9SLJj0ariLXpykyxsLWs6h9pu0pIaYzz0jbsqwF0BwQAYY8CWBvsJHAkhDkycWc/AzBhWFziatBl1anKTrO3Qq334XFBbqtxI6K1gS2m4uRBCiDYUZ9HTNdGC0+Pn1tEJHJ2uQhX0U+wzMm1lHSn2TnxuCngbV6wCJfGxRi9Tq4QQop1IIEe0qiSbgecuGcLFry6idq9EfzqNihcvHUbyoZYyLt0An98IhcuVn60pcOq/IPckMNpaoOdCHNniLHqevnAQF76ykOq6humRahX8b+LQQz92W0pNIcx+CNZ+BKGgEsAZeS0cfStYk9u3b0KII06yzcjLlw3D6txG2rw7UP+6FIBcSxIjx/8Ttb43YGrfTh6qYIRkx6Ccd6VqlRBCtAsJ5IhW1yfNzne3jeXH9SUs2V5J3zQbZw7KICPWiFZzCCNoqvPg9VPAXdWwrLYEPpoMV34DOWNarvNCHMG6J1v5+i9jmLexjPlbyumeZOXcIRmkx5nQt+e0yLpK+PIvsGV2w7KAB377nzJCb9wDoOukN0xCiE6rp7Ea1dvngau8YaGrDN3n18Lln0Huie3XuT8j4G2aIweUQI5UrRJCiHYhgRzR6tRqFVnxZqYc05UrR+eg3s888v3a9nPjIM7efrwfJn0M5vg/9x5CCFQqFZlxZi47qguXjsz+88duS3GVNQ7i7O331+CoGyAup027JIQQqryFjYM4e/vxfkgdCJbEtu1US4g0tQqUEuQytUoIIdqFBHJEmzqUG8FKl5eKWh8uX4C0GBPJ238m6laK14C/E1eGEKKDaq8gTnmtl4paL25/kDiznkSrAUvNrugvCPrA0zSvjxBCtLod86O3laxVRg52RgFP9BE5MrVKCCHahQRyRIeWX1nHrTNXsCKvGoBRXeN5vXsPzNFeEJMJavm1FuJwsK2slhveWcamEqW8rUatYtLILB4YEU+zE7t0ljbpnxBC7C2Y0CP6ucme0XmvTwLeyH3XGmRqlRBCtBMp8SM6rDKnl2vf+r0+iAOwZEclpVmnRr8YOvbvSuUaIUSnVuRwM2n64vogDkAwFOatRXls99ogITfyC3PHgSWpjXophBCKYoebXSknRh65AoTH3KEUZuiMAs0kO5apVUII0S4kkCM6rNIaDxuKnY2WhcPwzwVOys98C/R7PXVXqWDkddBjfBv3UgjRGraXuShyRJ6GcMNn+QQu/gDiujZuSBsMZz4LpphW758QQuxtZ0UdD/5cTdlZ74Le2qitbsAVeLqfplyrdEYBTzOBHBmRI4QQ7aGTjvEUnZnbH6TY4eGnDaUUOdyM7ZFEr1QbKXZjo/UKo9zE/bCxmlpfDK9duwBTzU7lIiK5F1iSwWhvi10QQhyEQDBEkcPDwm0VbC5xMiQ7jsFZsaTHRq8staWstpk2FwWadHKmfAeOXUop8rguYEuT0uNCiHaxrdzF3M3VfNurF+dM+RVV5TbwOtGk9GG900is30L39u7koQp4IheR0BrAXd3m3RFCCCGBHNHGPP4gP20o5Zb3lhMKK8um/bqd7slW3poystGNXXqMMcpWYMnOGsrUQ8jO7Rp1HSFE+wuFwqwucDBp+mLc/uDupdtJtOqZed1ouidbI76ue1Lk5QA2gxadRqUEbmxprdBrIYQ4ON0SLdx8Qne2VngY9PVObAYtBp2VCtdWxnZP5LFzO/GUz2C0qVUm8DeTfF4IIUSrkalVok2V1Hj4y/sr6oM4e2wpreXZHzfh9gXqlyXbjfROtUXczvlDM0i0Glqzq0KIFlBc4+Gat37fK4ijKK/1cdvMFVS6vBFf1zXRQqo9cjB3ypiuJNuiB3qFEKKtdUkw0z/DzlsLdwLg9AYor/URDsMvm8tZuK2ynXv4JwS8UapWGSRHjhBCtBMJ5Ig2tWhbBcF9ozi7fb6ykAqXr/7nJJuBaVcMZ3BWbP0ylQrOHJjG1JN7YTbIgDIhOrriGg+Vex3Xe1tXWBO1LS3WxHvXjqJnSsPIHI1axeVHZXP5UV3QaeTPlxCi40iwGvh+bXHU9um/bqO8NnLgusOTHDlCCNHhyJ2wiCoQDFHq9NZfeCRYDaTYDGj/xA1UtJs2AF8w1CTIkxVvZsaVw6mo9eHyBogx60m06rEZI1eFEELsny8QpNTppaLWh0atIsGqJ8VmRK1u+UScLk+g2XZfIBS1rVuSlfeuOYoKlxe3P0icWU+i1YBFgrhCiA4mEAo1e43jcPujPsjq8AK+yCNydCbwy4gcIYRoD3I1LCKq8wWYv7mcv328ihq3ciNmN2n51/kDObZH0iGPhjmqW0LUtp4p1og3aPEWA/EWmUYlREtwuP18vaqQR79ZXz/dKclq4H+XDmFol1j0Gk2Lvl9WvBmVSqk4ty+bQUuMqfmgbKLNQKJNjn8hRMemVakZ1zeFXzaXR2w/rkcSdmMnvewOekEdZWqV3w2hEKhllKQQQrQlOeuKiHZW1HH9O8vqgzgANe4AN767nB0Vhz6MNivOzIicuIhtD57ZT/LeCNHK1u5ycM/naxvlrCmr9XLFa0sorIpcKe7PSLDomTQqO2Lb3yf0IjlKHhwhhOhMCh1u0mKMEXN7mXQabjg+F5O+kwZyoubIMQFhCLjbvEtCCHGkk0COaMLjD/LKL1sjPkEPh+Hln7fh9jc/XSKaRJuB5y8dys3H52LbPfqmf4adD64/qlEuHCFEy6uu8/H0jxsjtvmCIT5dvotwpAP/T7CZdNw+ricPnNmXpN0ja7okmHnh0iGcNThdct0IIQ4LX60q5IEv1vHk+QM4c2AaOo0KlQrG9khk+uThxJo76ZTwUAhCgcg5cnS7g1aSJ0cIIdrcYXEF/cILL5CTk4PRaGTUqFEsWbIk6rqffvopw4cPJzY2FovFwuDBg3n77bfbsLcdX50vyKbi2qjtm0qcuH3BqO37k2I3cvv4nvzw12OZ/48TeHPKSEZ1TZDkxUK0Mo8/yPay6Bfca3Y58AdbPodDotXA5NE5fH3LGOb/4wQ+umE0pw9MJ9Yc4cZACCE6mUAoxOoCB4UOD9e9vQyDTsN/LhrMC5cOpXeqnTs/Xo23mXxgHVpwd4LmiFOrTMpXX/RrRiGEEK2j0985f/DBB0ydOpWXX36ZUaNG8eyzzzJhwgQ2btxIcnJyk/Xj4+O555576N27N3q9nq+//pqrrrqK5ORkJkyY0A570PGY9Rp6pVr5o6gmYnuvFBsm/Z/Lo6HTqEmLNf2pbQghDo5Rp6FbkpVlO6sitg/IjEGnafmExwBqtYqUGJlGJYQ4/GjVagZlxfLDHyV4AyE+XlbAx8sK6tsHZsZg1LZs/rE2E9gdyNFEuGWQETlCCNFuOn0g55lnnuHaa6/lqquuAuDll1/mm2++YcaMGdx1111N1j/++OMb/Xzbbbfx5ptvMn/+fAnk7GbUabju2Fw+X1nYZHqVSgXXH5eLSdcxf3X8wSAlNV48/iBGnYYkmwFDZ714EqKFxZr13DG+J5dOX9ykTa9Rc+6QDFSq1gnktIfqOh9VdT5CIbCbdPVTu4QQoqWdMSCN5+ZsJjXGyBWju5ARqyR631DsZFROPHGWTjoCMehXvjY7IkcCOUII0dY65t34AfL5fCxbtoy77767fplarWbcuHEsXLhwv68Ph8PMnTuXjRs38uSTT0Zcx+v14vV663+uqYk8SuVw0yXBzKuXD+fvH6+iuk75Ix5j0vGvCwaSk2hu595FVub08s6inUz/dRsuXxCjTs2lI7O54bhcSah6GDhSj8WW1i/DzuPnDeDRr//AtXuKZLLNwPOXDiEj7vA4TsLhMJtLa7n70zX1o49ykyw8du4ABmfFYtRJcPfPkGNRiKYy4kx8euNo8qs8PP3DRjaXKtONhmTFMr5PMqFQGLW6ZQPlbXIs7plaFbH8+J4ROTK1Sggh2lqnDuSUl5cTDAZJSUlptDwlJYUNGzZEfZ3D4SAjIwOv14tGo+HFF19k/PjxEdd9/PHHeeihh1q0352BWa/lhF5JfHvrWCpcPsLhMIlWA8k2A9oOmJzU7Q/wys9bmT5/e/0yjz/EjAU7KK/18eg5/bHvp8yx6NiO1GOxpcWY9Jw/NIOxPRKprPWh0ahIsOhJsRsPm9E4BVVuLnj5t0ZV97aWuZg0fTFf3TKGvun2duxd5yfHohBNaTVqdFoNt7y3nECoYTjzivxqLnplEd/eNobseEuLvmebHIuBA8mRIyNyhBCirXW8O/I2YLPZWLlyJUuXLuWxxx5j6tSpzJs3L+K6d999Nw6Ho/5ffn5+23a2HWk1atJjTQzIiGFgZizpsaZWCeIEgiF2VbtZXVDN6oJqCqvdBIIHlxSwrMbLmwt3RGz7clUh5bXeiG2i8ziSj8WWptdqyIwzMzArln7pMaTGmFo9iFNR62VTiZMVeVVsL3fh9Phb7b2+X1fcKIizRzAU5tnZm6j1tt57HwnkWBSiqTpfgBd+2tIoiLNHrTfA5ysKCUZo+zPa5FgM+pSvEcuP756uKoEcIYRoc516RE5iYiIajYaSkpJGy0tKSkhNTY36OrVaTffu3QEYPHgw69ev5/HHH2+SPwfAYDBgMEhehdbi8gb4eVMZd32ymhqPcuMVY9Lx1IWDOKZ7Amb9gf2KOtz+ZqvtlDm9dEuytkifRfuQY7Hzyqus45b3lrO6wAEoubbOHJjGPaf3JaWFpz16/EHmby6P2r48r4paTxCrQUboHSo5FoVoqtYTiJpIHmDBlnKmjMlp0XNPmxyLgWamVml0ykgdCeQIIUSb69QjcvR6PcOGDWPOnDn1y0KhEHPmzGH06NEHvJ1QKNRojrFoO9vLXdz07vL6IA4oQZnr3v6dvIq6A97O/qpo2YydOmYpRKdV6vRw9RtL64M4AOEwfLmqiGd/3ESdr+nImT9Dp1aRGRe9Il6K3Yhee3hMIRNCdBx6rZpkW/TAdHqcCb2mE+bn2jMiRx3lOkpnkhw5QgjRDjp1IAdg6tSpTJs2jTfffJP169dz44034nK56qtYXXHFFY2SIT/++OP8+OOPbNu2jfXr1/P000/z9ttvc9lll7XXLhyx6nwBXpq3NWJbOAzTft2ONxA8oG3FWwwM7RIbsS0nwUyiVZ4eC9EeShze+qSf+/p4eUGLT3vUaNRMOqpL1Pabju9OvEXOB0KIlqVWwaRR2VHbLxiaiU7TCYPIzY3Igd2BHBmRI4QQba3TD1O4+OKLKSsr4/7776e4uJjBgwcza9as+gTIeXl5qNUN8SqXy8VNN91EQUEBJpOJ3r17884773DxxRe31y4csdy+IJtKnFHbN5U4cXuDB1Q+PN6i59mLhzB5xhK2lzdcUKTajUyfPEKqVgnRTgod7qht/mAYl/fAgrUHIyvezL8uGMj/fbqmUb6KyaO7cFS3+BZ/PyGEqPMFcbj9XDYqm3cW59Uv16hV3D6uBxuLaxieE3dA1zQdSv2InCiBHK1RAjlCCNEOOn0gB+CWW27hlltuidi2bxLjRx99lEcffbQNeiX2x6TX0CvVFvVpfa9UGybDgV/wZMebmXndUeRX1rGtzEV2vJmcRDOpMdGnWQghWld6TPQgqk6jwnIQx/iBshq0nDkwjaO6xrNmlwNvIMSgzFiSbHrsJn2Lv58QQhh1Gr5dW0TfNDuvXzmCzaW16DQqchIsfLq8gJxEC/oOWPVzv5pLdgwSyBFCiHZyWARyROdk1mu54bhcvllTRHifPMVqFVwztutBP7lKsRtJsRsZniNP3YXoCFJjjPRMsbKppGnA9sJhmSTZWmeak0mvJTtBS3ZCy5b7FUKISGLNeu4Y34uJ0xbxzqKdZMSZCIVgV7UbvUbN938d2+rVAVvFfqdWSSBHCCHaQyd8NCAOJ90SLbw0aSgxpoYLhFizjlevGE6XeHM79kwI0RKSbEZemzyCwVmx9ctUKjh7cDq3j+uJSSfPE4QQh4d+6XaeOG8AJp2G/Eo3u6rdJNsMvH31SDKaScLeoe0v2bHGIMmOhRCiHcgVtGgXgWCIslovoTCMyInn+9uPpdTpQaVSkWjRk2w3olF3widXQoh6To+fGk8ArVrFjMnDqazzUesNEGvSk2DVYzNKCXAhxOHDbtJx3tAMxnRPxO0PolKpiDFpSbQaOudoHGgYkRMtR47OBN7o+Q6FEEK0DgnkiDZX4vDw7pI83vhtOzXuAIMyY7jvjL70Tbdj1suvpBCdXSAYYmuZi8e/Xc/Pm8vQa9RcMDSTG0/IpXuyrb27J4QQrcLtD7KxqIZHvlnPsp1V2AxaLh/dhStGd+m8+fqCXlBpQB1lqrvODDW72rZPQggh2m9q1datW7n33nuZOHEipaWlAHz33XesW7euvbok2kC508vtH6zguTmbqXEHAFhV4ODCVxayusDRzr0TQrSEHRV1nP3CfOZtKiMcBm8gxLtL8pg4bRGF1dGrWAkhRGe2vrCG8176jWU7qwBwegO8OG8rN7yznDKnp517d4gCvuj5cUBG5AghRDtpl0DOzz//zIABA1i8eDGffvoptbXK3NpVq1bxwAMPtEeXRBspqKpj4bbKJsvDYXjgi3WU13rboVdCiJZS5w3w3JxNePyhJm35lW5+39H0+BdCiM6u0uXjwa/WEQo3bVuZX8328k6aEDjo3X8gR3LkCCFEm2uXQM5dd93Fo48+yo8//ohe31AK9sQTT2TRokXt0SXRRhZuq4jatrHEicsbOKjt1XkD5FXWsXh7BavyqymqdhOKdBUlhGgTNR4/v2wuj9r+1eoiAsGmQZ79KXN6WV9Uw6KtFWwtraW6zvdnuimEEC3K5Q00O7L4541lbdibFhT0R8+PA8rUKgnkCCFEm2uXhCRr1qzhvffea7I8OTmZ8vLoNwCi84u36KO26TSqg0pwXOny8sZvO3nxpy0EdgdvEix6Xr58GIMzY9FppSibEG1NrVZhM2qprvNHbI+36FEfZNLPHeUurnv790YlzE/qncxj5w4gNcb4p/orhBAtQaNWYdSpI45GBIi3Rr/+6dACBzAix++GYAA0kudQCCHaSrvc6cbGxlJUVNRk+YoVK8jIyGiHHonWVljt5vt1xfRMsREtVnPWoHQSDuJC59fN5Tw3Z3N9EAegwuXjsumLKXJ00rnoQnRyMUYtk0fnRG2fNCob9UEEbEudHqa8sbRREAdgzoZSnpy1/qBH8QkhRGuINes4b0hmxDaVCo7tkdTGPWoh+51aZVa+yqgcIYRoU+0SOr/kkkv4xz/+wUcffYRKpSIUCrFgwQL+9re/ccUVV7RHl0Qr2lpayyWvLqKs1svJfVN44Mx+PPjVOsJ7zYDqlmhh6sm9MOmi/0rWOqrQesoJuR1oTTFUVboxaNV4A42ffnkDIWavL2HKmK6ttUtCiAg8/iA/byonxW5kVNd4Fm9vnA/n1hO7kx1vPqhtFjs8bNudW2JYdgx/H20nTV8HqNjh1lBV58NikKfAQoj2VV3n56oxOaSaQ4zvosYSdBLUmvh5VxhLTBK+Q5hS2iEEfBGnVq0rD/LUEg9P9rKSDErCY1NsW/dOCCGOWO1y9fvPf/6Tm2++maysLILBIH379iUYDHLppZdy7733tkeXRCupqPVy68wVlO1OYvzDHyVo1CpmTB7Bsp2V1HqDHNcriT6p9manSHirdmH44f/QbfhCyYysUjGpxxkMvfQeLnp/R5OhzOsKpQKWEG2ttMbDze8uR6NWcfdpfZg0KpvF2ysx6jSM65NMzxQbseaDm15QUqOMrrtkcAJ/715Iwo9XQJ2Sa6uLPQNf3Ms49cOwWSwtvj9CCHGgAqEQnqpibgrPRPvxNCW3DJCTdRRl45+jLGxv5x4eoggjckLhMHf/4mZ1WYg7fem8HgaVjMgRQog21S6BHL1ez7Rp07jvvvtYu3YttbW1DBkyhB49erRHd0QrqnT5WFdY02jZd2uLmbWumMGZsTx10SByk6zNbqOuphL9rDvRbvy6YWE4jG7TV/QJevnrMXfx+LziRq8Z1iWuxfZBCHFgFm2vJBAKEwiFefDLdcSadfRPj8EfDDFzST4//PXYg95meqwJu0nLbYMh4YMpNBrKV7ML/XvnEbxmAVh6tdyOCCHEQdITpOv299EufrHRclX+IpK/nETo3I+BmPbp3J8R8IG68e3C/IIgq8tCnNJVy6ztRjbpM+nllUCOEEK0pXbNBpudnc1pp53GRRddJEGcDsLjD7Krqo7t5S5Ka/58nhlfIPJQ4nAYVuRXU3wAuWy07nK0m76J2Kbb+gMnd9U0WmYzaBnbWeeiCxFBicPD9nIXBVV1UY+pjqBi98i7Parr/MzfUs7i7ZW4fAECoYPve4rNyO3HppO47NnGQZw9gn5Uy16n1i15sYQQ7ScmWIF1+SuRG8s3Y/UUdejzd1RBb5OpVWvKglh0cElvHTp1mPmh/uCtibIBIYQQraFdRuRMmTKl2fYZM2a0UU/E3oocbp6fu4WPlxXgDYTIjjdz7+l9OKpbAnZTM4numhFj1mHRa3D5gk3aVCrIiDXtfyMeR+QbuN2MAWf9910TLTx/6ZAD264QHVx1nY9fNpfz5Hcb2FXtxqTTcMnILG44LpcUe8er1jSya0LUtu7JViz6g/+Tk2gzcNHAeHSr10ddx1i6klpfHZg63mcihDgyaALuZhP+6qq24sscjr6zVdQMeJtUo1pbHqSLXY1Bq6J3nIr5VQO4WqZWCSFEm2qXQE5VVVWjn/1+P2vXrqW6upoTTzyxPbrUqVS5fBQ63Hy3pphQOMwp/VPJjDURbzUc8jbLnB5ufGc5K/Or65flVdZx3dvLeOXyYUzol3pI202xG/jr+J48+k3Tm7BJI7MPqEqVymhrtj0xIZEvbx6ATqsm0aonySY3c6JjcnkDlNR4+PGPEspqvRzfK5meyVaSIwRlQqEwczeUMvXDVfXL3P4gry/YwaZiJ89dOoQEy6Ef860hO97M8C5x/L6zqknbg2f2JdF2aP01m62E47qiqtoRsT2Q0IuwVo57IUT7KKnxoA3qSNAalMBHBOHYLs0WdOiwgv4mI3LWVQTpl6CMhu6XqOWLij743U4O7ZGfEEKIQ9Euf1E+++yzJstCoRA33ngjubm57dCjzqOi1sszP27i3cV59ctenLeVswelc98Zh36jlF/lbhTE2dsjX//B4KzYQxoBoNNoOH9oJolWA0/9sJGCKjcJFj03Hp/LOUMysBl1hMNhSmq8VNb5UAHxFn2j9wqbEwnljEW949cm2w9nHcVml5EHv1+H1ahlyjFd6Z+hIvFPBLWEaA113gCz1hbzt49X1Q8wm/7rdvqk2ZgxeQRp+4wiK3F6ePzbDRG3tWBrBcUOT4cL5CTZDDx/6VBmzN/Gu4vzcPmC9Emzcd8ZfemXaie/sg6H249ZryHeoj/gxMdqUwyBsXei3fZT00aVmqr+V/LX91dz0YhsRnWN75CjlYQQh69NJU4WbnTxtyGTUS99tekK9gyIy0GjVrV95/6sQONkx05fmLyaMKd1U0YW9UrQ4MbIlvIq+rRXH4UQ4gjUYR4NqNVqpk6dyvHHH8+dd97Z3t3psP4oqmkUxNnji1WFnDYw7ZBHziyP8AR9j4IqN3URpkbtKxAMUe7yEQ6HiTHqMO8uCRxn0XPOkAyOzk3AGwih06hIthlRq1V4/EGW7qjkbx+toqRGeYqVHmPk6YsGMTQ7DoNOg96WiP+sF+Gza1HnL6p/v3DGcLYf9yyXvL2JGk8AgF82lXPu4HTuPaMvCRLMER1IidPTKIizx/oiJ6/8spX/O61voyH3Lm+gvtpbJH8U1tAvvWMkzqx0efEGQhg0alJjjPxtQi+uOqYrgVAYs15DKAwv/LyVNxbsqC/Be0xuAk9eMJDMuAMrR65O6UvgtGfQ/vB/ENidD8dgp3z8szy5yMP8LZXM31LBsC5xvDhpqARzhBBtZsGWcnqkxePtehs6ZwnaDV80NCbk4rvwPZy6JDrlWSnohb1GPG6sVK4Hc+zK36vs3V//qAhKIEcIIdpQhwnkAGzdupVAINDe3eiwXN4A0+dvj9r+6i/bOKpbAjGHkM+muZsevUaNTtP8U6Qih5v3F+fx3pI83L4gJ/RO5q/jepKTaKl/AhVp+sjOijqufH0pwVDD3W2hw8Plry1h1u1j6Z6sTKvSxWcTuOgdQq5ywq5SVJZEVlbqmfLutvogzh6frSzkiqNzJJAjOpQ560ujpnqauTSf647NJX2vUTl6rRq1CkJRXnOoo+9aUnWdj6U7qnjmx41sK3PRPdnK307uxZDs2PoRRr5AkOfnbuHVX7Y1eu2CrRVc8+bvvHX1SJIPYDqk2hyLesgkArkngbMIbxA21Vl56jcHC7ZX1q+3bGcVi7dXctag9JbdWSGEiKJbopWRXeO5Z/ZmusfcxmkT78DgKSOot7PWoeebn+qYOj5ApyzDEPCCoWGKe4FT+aOUYlGu7cw6FanqatZVqjm/XToohBBHpnYJ5EydOrXRz+FwmKKiIr755hsmT57cHl3qFAKhMDV1/qjtDrefQPDQKiIMzorFoFXjjVBR4dwh6SRYok+BKHa4mfLGUtYXNSQd/np1EXM3lPLVLWPITY5cXtzjD/LKz1sbBXH2CITCzJi/gwfP6lc/SkFrSwJbEtCHIoebK2b+HHWk0KfLCxiSfRAlyGvLIORXnjqZ4w/8dUIcoEqXL2qbxx9qchzEW/Sc0j+Vb9cUN1nfotfQI8px1VY8/iAfLytolP9qXWENV72xlIfO6sfEkVnotRpKnd6oAegNxU6Kqj0HFMgBQGdEm5BDpTGNyTOWsGZX09GJAO8s2skJvZKwGSMEtb214HcDYbAmH9j7CiFEM0Z0jWNbmYvPV+yiS4KFdHsivdJyya/288Svu9hRUcf1x3bS1AGBxlWrilwhLDowahse8HXRVrOupvl8hkIIIVpWuwRyVqxY0ehntVpNUlISTz/99H4rWh3JrAYt4/umsCJKLptxfZKxmw7tvzQlxsCMK0cw5Y2ljYI5AzLs3D6uJ6Zmqs2s2VXTKIizR50vyHNzN/P4uQPqp1k1bg+wttDRzHYdlDk9pMWYUO87rzwMoX2GN6hVkGAx4A0EaRTPqqsCRz6s+Qh8ddD/XEjsAdYUcJXDtnnw85PgKICU/jDuQUgdAEZ71L4JcbCO65nEi/O2RmwblBmDZZ9jxGrQcc9pfdhSWsumkoZqICadhjeuGklqO08dKnN6+ff3GyO2PfHdBk7qk0xmnJk6X7DZqZnbympJsxsJqyDRajigHBLhsBLsjSa0b1s4DFU7lGO9YAnE5kDWSCheBWlDwJK43/cUQohoFm6tINas5+EJmVzUHfQVa2H1L/QxxXLSxPNZWJZMVV30YH6HFvQ1ypFTVBsiwdT4PN1FX8N3rnTC4TAqVSfMAySEEJ1QuwRyfvopQsJKsV8atYozB6Xz2vztVOzzdN9u1HLpyGx0Gs0hbVuv0TAiJ47ZU49jRV4VxTUehmbH0SXB3GwVqFAozBcrd0Vtn7u+FMep/oiBHKNOQ5d4S6Ob1L1lxpmY9ss20mJNnDs0o9FT+1iLjjMGpvPxsgLUKvjrmBTO6q7DUL2JsN6GPcMKAT/4nLDgWVjw34YN/z4duoyB816Bxa/Cb3u1FSyBN06DC9+APmeDupOVCRUdVtdECwMz7KzeVdNouVoFD5zZj/gIo94y4sy8c/Uotle4WJlXTUaciUGZsaTFGNFq2vd3s7zWG3EEHyjVtSpqfWTGmTHpNOg0KvzByIEXk17DSc/MQ6fVMOWYHC4anhVxGube4i16LhiWxSNf/9GkrUeyhZfOSsFWshRcZZDUF4IeeOMM8FQ3rKg1wnmvwqqZMHQy7Kc6nhBCRFLr9fPlqkLundCVCfEhNF/eDCXr6ts1C1/gmHEPUZ09sR17+ScEGyc7LqwNE2/cJ5BjrMNZq6fI4Wk0RVgIIUTr6VA5csT+ZcWb+eTGo3nmx018u6aIUDjMyX1T+fspvciKP7CkodHotRqy4s0HtR21WoXdGP3XyKTXoI7ydMas13LTCbn8uL4kYvu5QzL46wcrcfmUhMj/umAg8bur9Jh0Wm45sTtzN5Ty5IRUxuS9hOn9d/faGStMfB+0psZBnD12zof1X0Lewsgd//bvkDkSYjKi7psQByPZbuSVK4bzxoIdvLNoJy5fkCHZsdx3el/6pEUPIiTbjSTbjYzqmtCGvd0/3X4CSdrdebUSbXouGJrJ+0vzm6yTHmPE5Q3i9AbBG+SpHzaxcGsl/504uNnKcyqVitP6p/Luop1sK3fVL++dYuGdMywkvjdBCeIAnPI4LHujcRAHlITJX/9Vaa8rk0COEOKQaNUqLAYtWQYvmt/fahTE2UM1+wFiu48DUtq+g3/WvlOrakOkWhqf/zNNysPFzaW1EsgRQog20maBnCFDhhzwcMvly5e3cm86t5xEC0+cN4C7Tu1NGIgxabEaDj7BcUu5ZEQ27y1pepMGcNlRXZq9IctNtvLk+QO4/4t19U/3jTo1/zilNz+uL8G1e0rG7PWlFDs89YEcgJwEC7NuHUP8+rfRrn238YZ9tcrNm7qZ8sZLpsHwKcoonH25ysBdJYEc0aLSYkz8bUJPrjw6h1A4jNmgJe4AS3B3NIlWPck2A6XOppW10mOMJO4VdL19fE/Ka32NgrZdEsw8cnZ//u+zNY1eu2BrObuq3PXnjUqXj/JaL4XVbhIsepLtRlLsRtJiTbxzzSi+W1vMR7/no1apeOv8dBLfPUk5dveI7QJlkaeAUVehBHvLNkN8tz/5iQghjkRGnZYrj87BHC6D1R9GX3H9F5Dav+061lL2nVrlCtM3sfH1fKIxjAE/m0ucHNezU6Z0FkKITqfNAjnnnHNOW73VEcFs0EacrtQeshPMXDe2G6/+2rgqTf90OxcNz2w254XdqOOcwRkc3T2RvIo6ymu9mHQa3lucx7xNZY3WXbqjir57lVuuqPVSXVZA8m//ibxxjb7xDd2+vDWga2b0kaZjfL7i8KLTaOorOnVmKXYjL102lEunLW40xcqk0/DCpKGkxBgbrfvvCwdSXuuj2OHGatTxR6GDuz9dw65qd5NtL9hSzqCsWIodbv7+8Wp+3Vxe39YlwcyMySPITbaSHmtiyjE5nDM4HbUK4gp/aXrMh/ZTCTHglgTnQog/pU+aHU1dhfIQKZrmrkc6sqCvfkSOJxCm0hNukiNHrTORoa5kc5Sp8kIIIVpem92pPvDAA231VqKNxZr13HRCLmcOSuODpfnUePycOySTvun2Zsua72HQaciKM6MKw9VvLsXjj5x3Y9+y6j9vKiNH54WawsgbLlwOo26ATbMit3cfD8VrIrelDgBTx5rKIkRHolKpGJQZyw9/PZbv1xWzKt/BkOxYTu6bEnFofaxZT6xZT/dkKwVVdTzw5bqoeXNizTpc3gBPfLehURAHYGdFHZNfX8LHNxxNaowRlUpFwp5Rf9URRgaGQ0rpXG/ThOyoNWCKh9jsg95/IYTYI9FqIOQzQM4Y2DE/8ko9Tm7bTrWUgK/+wVaxSzlnJ+yTIwedkXTK2FQS4TwrhBCiVUgmV9EiYs16BmTG8ui5A/jvJUM4oXfyAQVxGm3DouO4npHLAQ/IsDM6tyGwUub08t85m9lU4VcqTUVSthEyhkeeMqEzw9ip0PcsUO8TzzTGwrmvgFWGBwvRHK1GTZcEC9cdm8vzlw7hmrHdyE6w7DcRc6JVz9TxPYk1N50SqlLB0bmJVNR6+Wp1UcTXF1S5Kav1NG1I7dd02Yq3lWM9kmFXKYEca2qz/RVCiP2pDZvhpPsbTUOqlzaYsK0TTtUOhSDkrx+RU+xSHrTF7RvI0RrIpIQtpU7C4egVBYUQQrScdpk7EgwG+c9//sOHH35IXl4ePl/jCkyVlZXt0S3xJ7h9AcqcXpbnVePyBhieE0eyzUhchEo80VgNOu45vQ/rCh0UVCnTLUZ2jeeG43LZWeHipXlbGZ4Tx9DsOFQoN3MvLoFxJ99L4ueXRNhgMpgT4Iov4bfnYeXbStK+nqcoF1tx3SAmG25aDGs/htL1kDMWeoyXJ/RCHISSGg87K1xsKHbSJd5MjxQbabtHy+zN4fZR7PDy3Zoiarx+HjtnAOW1Xv757Xq8gRAqFfz7gkEk2w3kVdYRjFBiPN6i5/kzUunhWAgrZ4M9A/qcoXyN7QLJfaF0r2pWW+ZAygC44HX4+Uko2wBxOXD0XyD3JLClgbZz5ikSQnQcayvVDNFrMV35Hcx9BHb8CgY7DLsSeozHo7Xx50pStIOQX/m6OzhV6VHOyTGGfUfkmMhQleP0Bil1eg/6QZ4QQoiD1y6BnIceeojp06dzxx13cO+993LPPfewY8cOPv/8c+6///726NJhp9YTwO0PYNG3fi4dlzfA9+uK+fvHqxvdeJ0zOJ17Tu9Lks1AUbWbjSVO1u5ykJtkZUBmDOkxJtT75M/Jjjfz0fWjWVlQzeYSJwMzY7n+7WX1OTje+G0HiVY9H90wmt6pNtYX1RCI70n49GdQzXu8oVJN1kg45Qkw2pVpFSc/DGNug3C4YRkow4UTu8PxdylPnqTcuGgjoVCYqjoliB1v0R9wMviOJq+yjiteW8yOirr6Zck2AzOvO4o6X5BfN5dh1ms5rmcSX6zcxX9mb65fbwY7OKpbPG9NGcmibRWcPjCdtBgjZr0Wi16LUaduNNXSoFUz8+JMen4/CSq2NnRi7sNw7qvQ+3S49EP49g7Y/INyvBvsYEmCbicogdqgT5lSZZNROEKIlpMRoyFc64fSddDvXGUkoLcW8hfhNybg0CZ1vkBOYHcy+92BnAp3GI0KLPsOOtIqgRyALaW1EsgRQog20C6BnHfffZdp06Zx+umn8+CDDzJx4kRyc3MZOHAgixYt4tZbb22Pbh0WatxK1YD//bSFHeUu+qTZueWE7nRNsmDWt85/965qN1M/XNVk+ecrCzmuZxKDsmKZOG0RJTUN1W3sRi3vXXsU/dLtTW5g02JNpMWaGJgZy6n//aVRIlWA8lofT363kb+d3IuZi7dhXfMmquIlcPIjSgUajRaKVsN7F8HkbyC5N2gNYE9vfkckiCPaSGG1m69WFfLxsgLUKhUTR2ZxSv9UUmM6VxLk6jofd368qlEQB+DOU3rx6NfrmbuxFFASIKfGGBsFcfZYtK2SdYU13HpSj0bnghS7kauOzuGlnxuSqF84KIHsVc82DuKAErD57Hr4yzJlKuV508FVriQyNtiVoE2k6Q5CCNECar0B0lTV6N89EzKHw9DJ4K5Wrke0BtQLniHm5CeBznWOJ7h7xLy6IZBj16uaPnjQmUimCq0aNpc4OaZ7Yht3VAghjjztEsgpLi5mwIABAFitVhwOBwBnnHEG9913X3t06bDg9Qf5Zk0Rd3/akMB3R0Uds9YVM/2K4ZzQK7nJCJiW8PGygqhtNW4/t3+wslEQB6DGE2DKG0v54pZjSIty81pQVUeNO3LFmVnrirnu2G48dGIy1nffAo8D8hc3XXHdp5D8fwe+M0K0ssJqN5dOW9Qo+PHgV3/w/tI83rhqZNTjoSOqdPlYtK3xVNgBGTGU1njrgzgAY3skMvuPkn1fXm/Ggu2cMTCN5L2e4uq1aqaM6UYgFOathTvxBkJc0NuE8fNPIm8kHIJtPyuBHKNd+SeEEK0sEAzxx65qBjpXKoGPnb8p//aiUanRjL0LbJ2sQt4+I3IqPSHshgjr6cxoVGHSzWG2lEnlKiGEaAvtMgQhMzOToiIliWVubi4//PADAEuXLsVgiPQXQhyI0lovD365rsnycBju+mQNJTURkoP+SaFQmPzKuqjtGXFmVhc4IraVOr2U7hPg2Vutp3EQR69Rc2r/VK4e05XTB6RRUuPB5fUrZcSjcZUpT+aD+ylBLEQbCIXCfLumqMkIFoCNxbXM36dCU0fn8QebLDtzUDof7RPcNeo01Hj8UbdT4/YTjJAgM8lm4JYTujPjyhG8OGko3RKMEIy+Heoi5FdzlUPFFqjc1nnL/wohOqwSp5dF2yoJRzr/7BEOEW7u3NVRBXdfo+01Isemj/BAUKdMGks3+aUEuRBCtJF2CeSce+65zJkzB4C//OUv3HffffTo0YMrrriCKVOmtEeXDgvF1Z4m05D2KKv11ufjaElqtYoTe0euNLWnvTm13ugBlm6JFvaM3h3XJ5lXLh+G1aBlRV41Rp2ajDgTWqONcM6x0d8gYxi8fir89j+oLY2+nhBtoKrOxyfLo49g+2BpPjXuznOxbzfpMOk0jZZZDVoqXY3PNesKHRzVLYFoju2ZhN2o3Cg43D42Fjv535zN/HvWBjaX1gJh1GqoCRmiV6kDXJnHUOxQEqUT8MGuZfD2ufC/YfDcEJg5CUo3KNFtIYRoAZW1XhZvr0CVNSL6SnFdCeisbdeplhLYfS7X7CeQo1YrCY/1HhmRI4QQbaRNAznPP/881dXVPPHEE/zf/ynTXS6++GJ++eUXbrzxRj7++GOeeOKJtuzSYWV/KV5aK5nq0bkJJNmajqRSqyArztTkRq+hP5AeG30aSaxZx8QR2QzvEseUY3KYt6mUUqeXlflVfLpiF4XVHh78MZ/iUfdEzn+x54avfBPMeRAWvQg+96HsohAtQq1SoW7mONSoVXSmnMfJNgM3HZ/baNn6ohqG58Q1Wra1zEWyzUiXhKapPg1aNbeP64HFoKW6zsf0X7cz4dlfePrHTbwwbysXvLyQl+dtw+UNctn7W6k+/jEifUiB7GPY6k/g5XlbKap2KyNwZpwCxasbVtq5AGacDNU7W+YDEEIc8VQqFSvzHQQtKYR7Toi4TvjEe0DXCRMAB/dJduyJEsgB0JnJ0NdQUeujytXyDw6FEEI01qaBnHvuuYf09HQmTZrE3Llz65ePHj2aqVOncuaZZx7Sdl944QVycnIwGo2MGjWKJUuWRF132rRpjB07lri4OOLi4hg3blyz63cmqXYjFn3koEl6jJF4c+uU2M2IM/Ph9aM5oVdS/f1V92Qr7117FGkxRm49qXvE1104LItEa+Q+Vdf5+GxlIaf0T+H+M/vyyfJdbCqupVuShdevHMGVR+ewbGcVP20s594FAWovm0W467HKDZ7BBkfdpFSt+n6v/DgLXwBX9DwdQrS2OIueS0dFL21/+egu2IydJymvXqvh0lHZ3HdGX2LNSr9nrS3mhuNy0WkaX+w/+OU6Hjt3AJNGZmPQqlGpUCpZ3XwMXeItAOyocPG/uVuavM+vW8opcXiwGrU8+Lueiou/Jpw5Umk0xREecwec8V+CwSAqtQqvx0V4/n8aEnXuzeOANZ8oVeqEEOJPSrDosei1qAIeVCNvgBPuBcvuZL9pg2HiTFRbZmOs64TXH3tG5KiVlJoV7nDT0uN76MxkqpTpZcpISiGEEK2pTZMdFxcX89FHH/H6668zfvx4srOzmTJlCldeeSVZWVmHtM0PPviAqVOn8vLLLzNq1CieffZZJkyYwMaNG0lObjrlZ968eUycOJGjjz4ao9HIk08+ycknn8y6devIyMj4s7vYrpLsBp66cBA3vbe80cwBnUbFMxcPJiWm9Z4GdU208NzEIVS6fARDYWxGXf0onYtHZBFn0fPMD5sodXqJMem4dmxXJo7MjnrTuiKvmp83lqFRqXjwq6X1+7NwWwXvL8nj4xuOZvIMJQA3aVAMmlWvocoYBsOvhoAHNn4HjgLIHAFbZisvDvqUHBlxOa32OQixPyf1TmZmhp01uxrndhqRE8eInE6WCBNIsBq4cnQXTu2fitsfxKBVE2/W8cmNR/PAl+tYkVeNRq1iRNc4suJM3H9WX24b3wMAo1aD3aScAwKhEG8vzIv6Pp8s38UFwzJ4ctZGMpNzuemMlzA7NoO7EtWaT9AueIYh8d2IPeUt1m3fRU7eb1G3xba5MOp6MHTCqQ5CiA4lxW5kxpUjCPkKwZGvJDo+6UHloVLFZtj2E2SNIlyxBdL6tXd3D85eI3JC4TAObxhbtGeCehNplKFRq9hU4mRk187390wIITqTNg3kmEwmrrjiCq644gq2bdvGG2+8wWuvvcZDDz3EuHHjuPrqqznnnHPQ6Q78ifQzzzzDtddey1VXXQXAyy+/zDfffMOMGTO46667mqz/7rvvNvp5+vTpfPLJJ8yZM4crrrjiz+1gO9NrNBzXK4nvbh3L6wu2s7nUxaDMGC47qgtZ8a1fCcdm1EUMzMRbDFw8PIvjeybjDSg3er5gmN+2VlDq9DI0O5asODOJuwM/lS4vT/2wkdtO6sGtM1c0SWfh8YcorHbjcPsZ3iWWoc65mFbOaNqhDV/DxPcbAjlQn5BPiPaSGmNi2hXDWby9kveX5KFSqbjiqC4M7RJHir0TDr0HNBp1k2mSAzNjmTF5BDUePxqVijiLHp1GRXGNl5V5VZTX+hjWJY6MOBOJVgPBYJgKV/Tk5zVuP2a98ifrhEw15s+vhOI1jVeq2ErXuTfjPeEVsCZDdZTAkD0DNK0zQlEIcWRRq1X0TrNCWRi+vn13Bb25jVca9xCkDW2X/v0pgYZkxw5vmGAY7M1MrdL6HKTFGNlc4my7PgohxBGqXcqPA3Tr1o2HH36Yhx56iNmzZ/PGG29w5ZVXYrFYKC09sKS0Pp+PZcuWcffdd9cvU6vVjBs3joULFx7QNurq6vD7/cTHR35y4PV68Xobbi5qapqpkNSKXN4AFS4fvkAQi0FLis0YMZGwWa+ld5qdR84ZgMcfxKTXoNM0nkHnDwaprvOjVqmIt+gpqfHiDQTRadQk2wxoNS0/406lUpEaY8QXCLJ4eyXXvPl7o8TMQ7NjeXHSMFJjjHgDIUpqPDg9ATz+yNMflu6oYnS3eCYPMBE7/4XIbxoKKCXJM4YpSU8zhoI5scX3TbSNjnIsHqxQKEyJ04PLG0Cv1ZBo1ZMaY+LswRmc1CcZFWAxNB+89gVCVNf50GpUxFuUgKfLG6DK5SMYDmMzauuXdyRxFj1xFiVg4vUHmb+lghveXoYv2HBcj8yJ53+XDiHFbuS0/mnM21gWcVtHdYtn7S6lAl6arrZpEGePopXYcFM66GaSC66KvM6oG0ArgZxD1VmPRSFai1ajIfjHF0oQJ5Jlb6Dqc3aLv2+rH4vBhmTHFW7lqZq9malVuPLJiDWxUSpXCSFEq2u3QM4eKpUKrVaLSqUiHA7j9x94xZby8nKCwSApKSmNlqekpLBhw4YD2sY//vEP0tPTGTduXMT2xx9/nIceeuiA+9QaCqvdPPHdBr5ZU0QwFCbRqudvE3pxSr9UYqPkvdFr1ei1TQMyeZV1vLNoJ9+tLeK6sd1QoeJ/P22mpMaL3ahlypiuTBqVTZKtdUYGFNd4ufqN3xvdyAEsz6vmpXlbuOf0vhi0GrolWgiEoleW+XhZPq9NHoHKuQucxdHfsKYIzAkQ2wXOnwGW6JVzRMfWEY7Fg1Vd52PWumL+PWsjFS4fWrWK0wakcdepvUmPNWHdTwAnFAqTV1nHjAXbmbuhFJtRyy3Hd6dvhp2nvt/ErHXFBENh+mfYefis/vRNt2OMkly8vZXUeLjurd+bHNdLdlTy2vzt/H1CT8b0SCQ9xkihw9NoHaNOzYXDs7ju7d+VBX5Xs++VrPPgSutPaMT1qJe+0tCgUsHJj0F8txbZpyNVZzwWhWht6sqm+b3qOQsJomnxi+5WPxb3GpFT5VHO3c0lO8ZbQ0a2KWpAXgghRMtpl/LjAPn5+Tz88MN069aN8ePHU1hYyLRp0ygqKmqzPjzxxBPMnDmTzz77DKMxcuDi7rvvxuFw1P/Lz89vs/4BlDk9XPvW73y5qpDMOBMDM2PwB8Pc9ckafvyjhPBBlNHNq3BxzgsLePWXbfRItlFW6+XeL9ZSUqP8oa7xBHh29mYe+2Y9jlYqgbxsZ2WTIM4eH/yeT5nTQ7xFz40ndCfRqkcbpXy5Uach1qwlPTEWUgdGf8Mux8Bx/4Ap30N81xbYA9Fe2vtYPFihUJgf/yjhrk/WULG7gkcgFObLVYVc99bvlDmjTyPaY3uFizP/N5+3Fu6koMrN+iInao2KSdMW1wd2AdbuquHCVxayrQOXfV2wtSJqcPbdRTvJq3CTZDUw8/rRnDskHZ1GqeA1pnsCL00axrOzN9WP0DPZkyJWrgJApUbndxA7fQTq2Ey4YQGc+Ryc8xL8ZTkMvQJMsa20l0eGznYsCtHaQqEw/pzjo7aH0wbjV7f8A7JWPxaDu68FNVqqvcr52xotkKO3gtdJdpyRSpeP8tr9/40TQghx6Np0RI7P5+PTTz9lxowZzJ07l7S0NCZPnsyUKVPo1u3gn5AmJiai0WgoKWlcCaCkpITU1NRmX/vUU0/xxBNPMHv2bAYOjB4IMBgMGAztN2Uhv8qN1aBl+uTh7KxwUeb0cvWYrrj9QV76eStjeiSSFrP//DfeQJBXft5G5e4byvOHZnLnx6sirvv5ykL+clIPYkwtXz2nsNoTtc3jD6FRA85ixiQHKPPpuePknjw5a2Oj9dQqePjsfryxYAejcxM55aT7Ub17QdMN2lKhy9ESwDlMtPexuD/hcJjCag/rCh1sLauld6oNi0EbcYTJ2sIaCqrq6hOCR+LyBnjq+404vYH6ZblJVgqr3U22BxAMhfn39xt5buKQjlP5KhgAVxkQJsUYxKTT4PYHm6zm8gXZXOrEFwzRJ83OPaf1ZeLIbCpqfRh0aiDM6G4JnDkone7JVhaVVnFinwsx/PFh0/cccAFs/lH5/sf7YN4TcNNCiOvSqrt6JOnox6IQbc0XCEL2MUq1Kld50xXG/o2AquWnc7b6sRhsGJFT7VXO3dZof14MNgiHyLYoAfeNxU4Su8t5QgghWkubBnJSU1Opq6vjjDPO4KuvvmLChAmo1Yc+KEiv1zNs2DDmzJnDOeecA0AoFGLOnDnccsstUV/3r3/9i8cee4zvv/+e4cOHH/L7t4Vih5tJo7K5+d3ljXLK9Eqxcd/pfXH7mt4URVLl8vPt2obRTiqVcvMUTX5lHblJLV/RZUROXMTlRp2ajy7tSvLqV2HJy+g8DtK6ncCko++i35VDeP7nneyqctM7zcbEkdn8sK6YUd0SKC0tQqVdAWc8Cz8/Cc7d+9jlGBh7B1RulUCOaHXhcJg/imqYOG0RNe6GwEtajJEnzhvA1A9X1Y/K2WPNLgdDsiMfD6Ak9/3xj8ZB6j5pNn7fURX1NYu3V1JZ6+sYgRzHLvh9Bix/A/wejut5Kj9MvpUbv61ibWHjkUPdk60UVLl5d3EeL1w6lESbAbNeQ1mtl6o6P2admp6jbPzjkzU8/PUfaFQq3rr4doaY4jCtelOpVKc1wuBJkD0KPr+xYeN+Fyx4Dk75J2jlpkII0fL8wRCEwXjuK/Dzv5T8fAAxmXDcXaj++BxLXC4Q257dPHgBL6g0oNbg8AYwaog6UnpPFcAUrQuDVs36ohqO6S55CYUQorW0aSDn3nvv5fLLLycpKal+2fvvv89ZZ52FxWI5pG1OnTqVyZMnM3z4cEaOHMmzzz6Ly+Wqr2J1xRVXkJGRweOPPw7Ak08+yf333897771HTk4OxcVKfhWr1YrV2vFK0XZNtHLuiwsaBXEANpY4+XhZAfee3vuAtqNS0SiJsd3Y/H+91dDyvxoOt48+MQG+ntyVbZVeXllWy7pCJTHff05Pp99vt6POb0hSrdr4DfYtP9Lrku84OjcBi0HLznIXf525Eqc3wPG9kumarIdP/wnpQ+DEe8BgB7UGdi2HT6+BPudA98j5j4RoKSW7cz/tHcQBKHJ4eObHTVx5TA5P/7CpUdu+VZ4i0ahVjaYj1XoCZMZFf128Rc/vOysx6TUkt0YFrNoSZZSN1tgk31QoFKa81ksoDEnhCjTvng9l6xv2Ze1HZG2exUsXfse4N+oandNuPD6XF+ZuYVu5C6c3gFmvjNyJMenokqD8bfjfnM3M36I86Q6Ew1z+wU7OG3ABl59zGdmWALGBClj1Pnx2Q9OEozt/BW8NaJMQQoiW5g2EiHGXwGfXw7Cr4JjbIBwGdxUsnQZFq1APm9Le3Tx4QR9olAcDDm84+rQqUEbkAGpPFVlxJjYUS+UqIYRoTW0ayJk6dWqTZddffz2jRo06pKlVABdffDFlZWXcf//9FBcXM3jwYGbNmlWfADkvL6/RqJ+XXnoJn8/HBRc0norzwAMP8OCDDx5SH1rTtrLaqJWbvl9XzJ2n9Dqg7cRb9Fw0LIsX5inJ+CwGLcO6xLFsZ9On+4lWPfGWQx8CHA6HUe2Vv8Lp8VNVVYGtYg0xvz1G/4ot9O96HMceezG/1OXw8tJqjk9wNAri1Av6SFzwMPqUB3hsduOkxlq1iuq6kJLMeNdy5d++4rL33+GaQnAUKKN54rqCLQ2scsMnDlyp00NxTeRpg6sKHNxyYo9Gy6wGLX1Sbc1uM86i57whGby/tCHnwW9bK3jpsqG8szhyWe0Lh2fx7uI8MuPMLRvIcZXB5tnwy79AZ4LUQTBkEqQOAGMMJTUePl+xi7cW7iTeouPdY0qx7xXEqed1krrqBS4efDVv/V5KlwQzN5/QnSXbK9lW7qJHshV/IMRzczbz7dpiLHoNU47pyujcBDyBIGoVHNczmRN7J6NRw6JtlVw0cxeXDEniAf/rqDZ8E7n/lhTQ7Gc0Tk3R7vNAIcTlgC1dzgNCiAOiUoFKZwS9TRkFrNaCv06Z4j3gQqjYirozjggMeEGjXA9We8LRp1WBkiMHwOMgK74LfxRKNTshhGhN7V616mCS9UZzyy23RJ1KNW/evEY/79ix40+/X1vadzrG3gKhcH2y0/3RadRMOiqbb9YUsqOijvJaL7ed1IO7P13Drmp3/Xo2g5bHzxtImdNDt4OYWlVV5yO/so4Pf8+n1hPgrEHp9MuIIdFqYGVeJf2qfiLu+5uVxMMp/aHgd2Kr13Fmt2SOye0GS5+Pum3Njp8ZPaxxYKlPmo3t5S78AQNjR92KbtsPysVG0Uqoq1RWUqmh95nNd7xsI7xzPjj2ShCYMRwuelMZEi3EAXB6As22+/dK8G0zaHnr6pGk7ie3lVGn4eYTujNvUxlFu3Pi+IIhfvijhIfO6seDX61j79PncT2TyI43szyvmt+2ljOqWwtVaPM4Cf/6H1QVm+CUJ6B0PbgrobaEcIWZansfbnxnOcvzqgEYlRODfdOnUTen2zKLmy/7B6N6ZlLh8vLar9vZWOJEr1HzrwsGcsHLv1Fe60OnUTG+bwo7KlzUeP1cMDSTYV3i+XVzGV+vLuTy0V24dkxXrj+uG4FAiFDoFjTRAjlj/gpGe/R9LN8E71wA1TsblqUPhYvfgpisQ/jQhBBHkkAojNoQA+e9Ch4HeGqUaZ0qtVIl79IPCestNDOepWMKeOtH5FR7w1h0zeyBRqeM1nRXkR3fl/lbyvEHQ+g07VZXRQghDmvtHsgRzRuUGRu1LT3GiPkgpkClx5p4/9qjWLClHKNOw98/Ws3dp/XGFwixubSWjFgTqTFGnp29iUfO7n/A262q8/HiT1uY9uv2+mWfryykf7qdFy8bhtFdSvwv98JpT8PWOfDTP+vXU817grgT78eXMwZ+fynyG+hM7D0oyW7U8vcJvdlRXkufOA3BuOPQustQ+ZwwaCJ4nfDjA3DOC2BPj97xmiJ490IliBPbBUZdp3wNh6F4LRhj6+d8C9Gc9FgTKhVEikubdBp6JFv59wUDSbYZ6J5sJTXGhCZanoG9ZMab+fiGo/lpYynfrCki1qTjvCEZpMcY6ZIwgtUFDty+IIOzY9le5qpPYJ5ka7nROGFXKaqKzdDnLJh5KYQaglaq9CHYzn+D1QWO+mWeQBi/yU7UB7d6C1V1AW5+r/EIustGZ/Ph0nzKa30Mzopl6viefL26iK9WF5FkNZBqVyqhrClw8PQZWehrd2Jf/RWEwzi6nk5lXDoJJ9yL+qdHG7/fMbcpQZmaQmWag1oL5niw7B5t4yyG9y5uHMQBKFwOX0+F86aDKeYQPz0hxJHAqNOgCqghGFTOLQa7EsgxxkAoqJx31J3wkjvgqR+R49hfIAeU6VXuKrpmWfAHw2wuqaVvejNBdCGEEIesTf+qhEIh/v3vf/Pll1/i8/k46aST+Pbbb8nIyGjLbnQqGXEmRuTEsTRCgtN7Tu9D6kFOn0iLNXHB8CxKazykxRq5beZKkqwGMuJMlNd6Kahyk2DRkxpz4NvNr6hrFMTZY21hDZ8uK+DiLl7lwsZfB+u/arKeeu7D6Kf8qAzL9TUtnxwaNInSoI3bTurOMd0TsRt1VLv9HJetJ2n9mxg//VfDysveIJw9mvAN8ykNx2IN67CGw8q0KVe5chNqSQKdGbwOOOFuKFoNXcfC7IegbIOyHXsGnP6MshyUp2tavTKNS4h9JFj0XDA0k4+WFTRpu/mEXLokmOmR0vxUqmgy4kxcdlQXzhuSgUajwqDV4AuEmLFgO79trcSgU/PGbzvqq0Fp1CrG9mi5BJPB8i1oR1zTJIgDQOEKNItf5PR+5/PFGiV/za+bKyi98FIy1rwXcXuO/pNZUqbl1cuHMWdDKTUePxcNSWVUcogtJQ4GJmaSlZ7OtW8tw6BTc9HwLIZmxxEMKUHcaRd0wTjvYUzrZtZv0/L7i7h7nYvzxIew9DkHdcEi1OEQdBkNpjjIXwhf364EbQBS+sG5r0BCD+Xp+UkPKLm16iph4f+gfLOy3pYfoa5cCejuqURjTgBNJ7whE0K0Gr1apQRszLHww/2w9Uclsq+3wFE3Q//zUe17/uwMgj5QK2H5Km+YRNP+AjlWcFfRJcGMCli7yyGBHCGEaCVtejX62GOP8eCDDzJu3DhMJhP//e9/KS0tZezYsW3ZjU4l0WrgfxOH8PLPW5m5NB+PP0RWvIl7T+/L6D8xdSLZbuSZiwYx8dXFFNd4KKtVSkxa9Bpeu3LEQQWIZi6NnK8D4L0leVzVOxX6nw8r342+kWWv47via/SOHeB3wx+fKyWE47tSPOAGehrj2VZVwnVvL6O6zk+q3cj3l8RgX/ivJptS5S3Eu+YLXqo6gW7xJi7LLkfz6RTliTwoT4yO/z+o2g6rP4RJH8PbZ4PP1bCRml0w8xK4eg4sexO2/KAEgMbcDjljwZp8wJ+POPzZTTruPKU3WfFmps/fRo07QJLVwO3jenBq/zT0Ws2ffo+9R9/ptWquPy6X33dWNRoNo1GreP7SIS2bHye+G2yb2zSIs5tqxdtcdsZlfLFG+blnqg2fPZPAiOvRLn2l0brB9KFsTz+T+9/+A6NOzQm9k/nH0TEkbZyO+cvXGOhxMCBzJFWpD3LF8GSO7p3JK79s49VftgEwvk8yJ42qRL9XEGcP08bPqOtxNp+Eh7GheAjXH5urTF8rWArvX9J45ZJ18PppMPlreOPUhmM/NhtOeRzm/wcKfleCQOEwzH8WVrytJFEecCEMuxJiZcqVEELhqPNjVKngk2ugZG1Dg8/VkFtswAXRN9BRBTwNU6s8YXLs+5kmZbCDSxn1nR5rYs0uBxeNkHOlEEK0hjYN5Lz11lu8+OKLXH/99QDMnj2b008/nenTp/+pMuSHu9QYE/93Wl+uPTYXfzCEWdcyFWm6Jlr57OajWV9Uw6r8anKTrAzpEkd6jAn1AUz7AKVSTU0z+UHcviDV2ImJyYS6iqjrqWtL0O74GeY8CAY74cGXEbjuAVaVw9erfGjUBbw2v2HUT/8MO8Y170TdnnH5q5w1/mQS9G4075ytzPPew+uE7++G81+D2lJY/0XjIM4e4TD8/LiSANlZpPz7eAoMuRzGP6xMzxBitySbgZuOz+WCYZn4AiEMOjUpNuMBH0sHKzXGxGtXjmBHuYvftlSQbDdwTPdEku1GTLo/HzjC74HSP9CsfK/+Qj7yenUYNcqcslP6p3Ji72TOfX09fzn6Ik658GySNn+INugi0O8iFrpSuflDJfDr8Yc4o5uWzDm3oC3Yq1pdwRLiZ57B1Mu/5uwv17OhuGGUXvc4FdrFL0btSsLqVxhy0nT+8clOftlUwRfX9Mc65+HIK3trYMNXyuic/CXKsuo8+ORaJUfWuxfCGf9VArp7RugA/PoUrJ4JV82SYI4QAgCtTg0VJY2DOHtb8F9lempns1eOnBrfAUytMsbWVyvMSbSwZpej+fWFEEIcsjYN5OTl5XHaaafV/zxu3DhUKhWFhYVkZkpi2ebotWoyDqBc8cFKizGRFmPixN4pBIIhims8zFlfQqnTw6iuCdS4/Xy5qpBwGM4ekkFOgpkEa0PlBbVaxdmDM/h6dVHE7Z/YJ5mQKZ7qxGHEZh2ljLSJJHsU6vVfKMETjwPVohdQ5y0ifvx0LhyewNnPL2i0ulmnxuCJHhjCU4PNbCRh08eNgzh7WzoNRt0Iy96Ivp3itZB7UuNlK96G0TdLIEc0odWoD6iseEtJthlJthkZ2bVhdJ7D7Wd7uYsvVuzC4fZz+sA0eqXYGgV/nR4/5bU+lu2oJBiG4TlxJFkN2E17BWxK1sGM8aiMMUqS42iSeoPOgkGrTIO65s2lhMLwxUYPtthsug1+gFAoTIJVzyPfLKPWqwR+7UYtQ6zVjYI49cIhDD/ezeTB/+XuWQ2BHLsO1N5mKqF4a/B6leN9a1ktPrcTitdEX794jTLiaE8gB5QpoAVLYeAlSvB27yDOHo4CWP2BkkRZ3QJBMyFEp2bDC6V/RF/BU62cWzqb3YGcUDhMjZfmy4+Dkk+srhLCIbolWvjw93xJeCyEEK2kTQM5gUAAo7HxSBKdToff72/LbogI/IEQy/OquOqNpdT5gjx0Vj+e+mEj368rqV/nrUU7OX1AGg+e1Y8kW0MwZ0BGDH3SbKwvcjbaplmv4dYTe5Adb6G0JhPb2L+j2fRd08CKJQlSBzZKggygKVyG3bWTrW4zgVCYUV1juXmYlXhDGHdYS5X2SuLWfxlxf4LdTmRrhY+eFauj73T5JmX+ekwzOZpiMhpyY+xt+y+Q3Cf664RoB9V1Pl5fsIP/zmkIPsxcms/grFhevmwYqTFGqut8vLc4j3//sLFRcuYbj+vGdcfmEmfRKxfis/6h5Hyoq1SO2ZT+EZ82h095guy0bL6+NZmX520jFIZbTuxOvFnPf2ZvpsjhwWbQcumobF65fBjnvfQbDneA7sk2rLvmKnV7u4+D/hfsrnhSCcvfhsLl9D+m8cX//AIfk3ImYC9c3qQfAL7upzBzTUOgp7AmRHxMppLkOJKYTGWK5b7KN8Oom2DOA9E/7DUfwtDJUqJcCIEqWIfKmhJ9BY0eNJ2w/HjQC2odTh+Eofny4wCGWGUarkcZ5e0NhNhY7KR/hiSMF0KIltamgZxwOMyVV16JwdDwx8zj8XDDDTdgsVjql336afTStaJ1lNR4uPL1pbj9QTLjTOg06kZBnD2+WVPE2YPTOblfav2y1Bgjr185gncX5/H+kjzqfEFO6p3M7eN7kpNgQa2CYBg2+FPoceX36L7/B6r8xaBSE+p5KuoRV8M3d0Tsl3HnHOL6DeK5s7I41vcrsT/9B1xlYEkkcPRf4ZyX4PMblWDQ0MmQOgBCAWoTB/PZj5WMTeqHlR8i73R8LuxaCn3OVPL3RCo5NOxKmPek8r3ODEMug27HgykeqvPBltr8tBMh2lBBlbtREGePlfnVfLwsnxuP786W0lr+9f3GJuu89PM2js5NZGzPJCXpeMHShsYf7oXzpsG6T2HdZ0pgJ7EnwQlPsEnbk7dmbWDyUTlsK3NxTPcETDoND3/d8HTa6Q3wyi/b2Frm4p2rR/HV6iLmby4nYEqEc1+FopXw3Z1K4uGYTBh9C/Q8mUC4cSBnwdYqSo47E/uqacqN0dArILmvkldr8w/U9pnI2s+LefvCLLKMHpKsfhj7N/hoctMPS6WGnhPg/YlN29IGQ2J3OO4fSp+Cflj1HmyZ07COxqBsQwhxxAuHwkqOLUuSco2yr/7ngb7tRmu2mN0jcqo9yvWRZX8jcoy7Aza1ZXRN7IFGrWJFfrUEcoQQohW0aSBn8uSmF9OXXXZZW3ZBRFDm9FDj8XPfGX35ZHkBgzJj+GpVYdT1p8/fzujcBGxGJYARDIWpcPkoc3qYOr4neq2aP4pqWFfoINlqoNDh4aJXFjIwM4ZjeySi7fIEI0ZpiDEbSIwxY3r9JCVXTQQBQxy6kJdxjk8wL362ocFVjvbHewgddTNM/Ag1Ifjtv0r+Cr0Fw8DL+euoK6nSXIj19+eVG7F9jbga5jwCFVvh1H8rN6sBj9Km1sDoW5UEyY58JYHf+dOVaVgfXKYkPTXYYMxU5WbS0nJVgsSRp7TGUz/8/M/kv/ro9/yobW8t3Ml5QzOY/uu2qOu8+PNWBmfHYlOpleScfrfS4HEov/d9z4ZzXyFsS8dnz+aaT/L4dfNKAM4dkkm3JAvH9kzi/i/WRdz+7PUlTByZhUGn5ukLB2LTZ8Ds+2DD1w0rOQpg1l2Exz/CivKm05au+aKU7yf/iMGxFdW8x2HeE2CwExo+BU9IxWtja0mcfU1DharTn4FjboffnlOOW1CCsme/oIz82TeJs9YIvU5TAsSbv1cCvMYYOOomyD4a5j6irDfiGrBIFTshjnQFVXXYtHHoNbWoLv1AqfC35/wDkHsCjL6FEGo63UTMgDIix+HbHcjZX44cU6zyta4cfVIvuiSYWZFXxeVHdWndfgohxBGoTQM5r7/+elu+ndiP6jofS7ZX8uSsDWwtc5FsM3DZUV12lzuvjPq6Wk+AQLBh9MquajcXv7KIWm+AmUsbl19+95pRvPjTFhxuP2qViow4Mze9q5T4Hphh555TchnQ40zMK16L+F6OrHFYAxWYf38hYrt67SeE+58PM05uuCHzuTD+/jK9dv7KpvEzCEz8BO1nUxqmSOlMMPYOJQdIzS5Y8zH0Op3wDQtQOfKVocSJvZQbuNd258c58R5l6lfRyoY39zphzkOg1io3eVKSWBykSpePXzeX8fQPm8irrCMj1sTt43pwUp9k4i0HNww/HA5T6fJFba/1BgiGoLgmSs4ooMzpxRsIYbMkER58Gaql0xoagz5Y8xGs+Yiiy+ezvkjNr5sbph1uKKrhnCEZ1HoCONzRp8sWVLmZuTifYdlxpBpqids7iLMX1a9Pc+qlp7JvquJT+qWgqi1C9d5FDYEZbw3qBc+StnMhqkEXNb6J+mYqDJ8CNy2Cym2gNUFCN9BZlKDR5u8bpntaU+DCN+CXp2DTrIZteBww73E46X7ocgwEfNBjfNR9FEIcGQoq69hQ4mRsih9V2A+/PA0nPdAwTdSeASXrCAc81KpsdLpxKQEPaI04vMo1336nVulMyr7vPgfnJllZkVfdun0UQogjlNx5HuY8/iClTi9rCqpxeYMMyool2WbAZtTy3dpi7v60IRFoqdPLMz9u4qLhmVx/bC63vL8i4jZP6Z+K3djwq/PrpjLc/mDEdZ/6fiNjeyayLK+KY7onMDY1yMIbctlR7SfPZ+Pfs3fw6InX0rNgIeqyxokCneOf4rMtQS7o4ow8okZnhsETUc1+IGJpZHXZOrpTgLbbiXD9L8qFhdcF8V2BEGyZC2PugC5HweoPqQ1q8XQ9maS9R0Rc+zNs+g7ichoHcfb261PQ71ypYCMOitcf5MPf83niuw31y3ZVu/n7x6u55YRcbjqhO2b9gZ+iVSoVZw1K56soiceP75VErEnHMbkJrMyvjrjOsd0TMWrVVHhC2EffhmbHfNS7K5DsUT3uKV5f7WFYjxB90mxcN8xOboyKsNqFSm8hxtj89AG7SUedL8COchfHxkbIT7OHp5oErYfPbzqaFfnVxJp0dE+2kq6vw/DZDQ1BnL0/g4LFcNT1SuUUT7Uy/YowbJsH6UNhwMXgroBwUBl1N/JaZZRRbakyRdKaDLVlsOaDyH1a9CL+iR8TMCVhsqc3u59CiMObNxBkR4WL699exuY7ekPVDtj4DWz5QRm9Z7TDzgVQV4lqy2zM50yHzhbKCXjAYKNmdyDHvL8ROSqVMlLRqYzq7pli48c/Sqh0+Yi36Fu7t0IIcUSRQM5hrM4X4KcNpdz+wUr8u0fQ9E61ceeEXmQnmHn8u/VNXpNsM2DWa+mdauOCoRl8vrKQQKhh9E2iVc95QzPQaNSUOj0UOzyY9BpevmwY28pq+d/cLfVVaQA2lji5bVwP5t40CHvhfKzvPYatagdp5niGj/oLuceexqUf5PPfM16lRziPuILZqG0p+HufxcJiPZaQmoSYvX5N9VYYO1UpGeyugpQBsODZ6B/Chm9wZB5LjCUFfHXgLFHW1xqg56mQORzevQichahMGbyU142bT+jRUJkrLhtGXqeUG47G41ByighxEEqdXp6dvSli2yu/bOPC4VlYDUGq6/yEgVizjkRr86N0BmTG0DPFyqaSht9HnUbFaf1TuW1cT9RqFZeMzOKN33bg8jUOvp47OJ1zhmZwz2dr2VjiZGBmDP84bybasvUYtv+A35QEfc6mKBhLV12AvvHw6Xg3prl3KInDDTaCw6/FPXgKx/dMZN6mpknCY0w6tGoVLl+QkUlB1P6mwZi9FdQE2VDjJNlmZF2hgxpPgB7dQ1C0qpkX/a5Mm8wcDqiUnDvFa5VArM+lVKn65UnlqfHJjylVt+L2Gva/4Zvo23aVk+eES9/awKc3JpAR1wlzXgghWkS508t/Zm8mGAqDwark0Bp5HXQ/CXbMV6amnvaUkjPnx/vRhj3t3eWDF/CCRo/DG0YFmA7krsEUDw7lgUKvFBsAy3ZWMb5vM8mghRBCHDQJ5BzGiqo93PL+ivocvmcMTOOE3snc/+U6/u+0PtS4G49iuePknqTHmPh0RQG3vL+C0bkJfHD9UTz+7XrW7KrhrEHp3HJidzLjzGwvdzHljaVsL3fVv35odhzPTRzCLe8tp273TeJxPZKIM6oxb/kK65w7G96srhLdTw8xeMAGrh5+HZd/mEdWnInnL32Adxbt5NPndygXR0Dm2ZmcmthDyVdzwQyY/x+Ys3vCxfmvKflrPI6In4FbH8/7S/K4foAG1bd/h20/NTQufEEpP37RW/DaONwxucxbWM55Q7MalVjHVQHW1KYb30OtVYYTC3EQKl0+PFECGf5gmCKHh0e+Xse6QqUaXI9kK0+eP5D+mXb0msiZFlJjTLxx1Uje/G0H7y/Jo3eqjb9N6M1Xqwq56d3lpNgN3Hhcd778yxju/Hg1y3Yq1ZwuHJbJib2TOfuFBfXniw3FTj5aVsDTFw5ip7kbJ/RO4e5PVrO+eAf90u1M0JZj+urKhjf3OtEseAbTruX896yXOWOGi/xKd32zWa/hXxcM5Lk5m9GqVeQYasAXAlNcxKpS4ZwxaGzJvPn9lkYV8QZOymaI1tC0+t0ethQwxMB3/4Az/gNvn9O48pzODOe9quTWmX4SXDsX0gY1tMdmR94ugNZIXUjLpEFxmOp2gcqg9N9gjf4aIcRhyR8M159Dw2oN5J4E+YvgvYsbVlo6HXLGwLmvEkbFfsazdDy7c+TU+MKYdaBWHcAemBPqKxwmWvUkWPX8vqNSAjlCCNHCJJBzGPIFQjjcPj5fuav+pizBoufMQenc8M4ywmHQqhv/Mb7qmBxKarw8/UPDCIENxU4+WJrPR9ePJtGmx27UYdJrKa3xcM2bjYM4AMvzqnh74U4uHZXN9F+VKRM3n5BL0FFA7ILGpcX30K75gLMn3cJTv8JRuQlM/3Vbk6khj8wrZ9AF00jf9T0seQXyFkK3E5RpEWqtkmz4t/9F3L4z9wzCeV7YOK9xEGePxS9Bz1Mg51jK44ewrTyPNQUOpcKC16k83f/+biWnTrRqFAMuBEtyxPcXIhqNuvkLYqfHXx/EAdhcWsslry7i29vG0j05euAgPdbE3yb05OoxXSmsdnPxq4vwBpSA0YZiJz9vKmfq+J68fNlQar1KwFWjgrP2CuLsEQ7Dg1+u440pI7nv87WsL1b6c/soGwm/3hR5v3bMw1BXxP8mDqHK5WfhtgrSYoxkxpl4fu4W1hXWMK5vMmFnsRKUPet/8Nn1ymiZPWKyCJ36FC/NL28UxAH4YUeQgQMuQbPizaZvrlJBtxPh99dhwhMw+8HGQRwAfx18fbsyGuez6+GH++CiN5WADChVsPZMzdqHd8ClZMYYuKn8AbTT5ypVq3qdAeMfVEb6CCGOGHqtGrNeQ50viCdsxGxNRvXbc01X3DEfck8i3PW4tu/knxVsGJFj3d+0qj3MCcoDsKAPlUZPzxQbi7dHz7sohBDi0Ejt1MNMaY2Hf32/gUe+Xs/Oirr65ecPy+SNBTvqb9S2lNbSP8MOKDeUY3sk8c6inU22V+cL8uBX6zBoNZh25+sodXrZWuZqsi7AvE2ljOqagFat4vaTehAGbGFXxJuiPZJ8+Vw7pivj+6Ywa11xk/ZCh4cbvncR7HO2MnS59+nQ7xz4+Cp4/xLIHq2UC95HeMITOPXJXDPIjGp5M4m2V75D3enPc/ccpY8J1t3zuPMWKU/zS9fDz/9SqtxY9wnYdDlGSYCqN0ffvhARWA1achIi/96k2o1U1TXNC+ULhnjt1214A5FzUu2h02jQqFX832dr64M4e/vP7E3UeoN0TbTQNdGCw+2nOsL7AdR4AgSCIdbsahj1lmkOQnVe1Pev3bqIdxblkWDVcdmobPIr6rjz49VUu/1MHd+TswamU6VJhOI1MP8ZZYTMKY8rZcfPfh7GPUhd2MAvm8vIjjej0yg3EJlxJqb016Dpfw4k9Wr8pioVnPUCOIugeqdyTBavado5UII7Bjtc8JoyDatwJVRuB2+tkpz0ii+Um5G9+HNOhKNvIe6dcWi3z1GiXKEgrP8CXhsPVdE/DyHE4SfRqueSEUpuPLVWi2rF29FXXv4mancnDGYEvKDR1o/IOSCWJCAMjl0A9E6xsXaXgzpf01yGQgghDp2MyDmMONx+Hv1mPV+uKqR7spVLRmTx5e4y4t2Trby/uOFG4/XfdvDvCwZyx4erSLDqG92k7Wvpjiocbj+xZiXAUdFMZZxwGOItOubecRwJVgNby2oJaZpPcFcVMlHo8JBg0dfn8gEYkBHDdcd2Iz3WiEatxu/ZikalhmFXwd4Vaz67Xnm6rjXArmVKWfCsUajyF9PNHkLrqow69QoATzWOsJlHTskiUV1Lkr4IKt2w8r2Gdco3wQ/3wMmPgkqj7GhyH7ClSulxcUgsBi0PntWPW2euaDTN0aLX8Ph5A3j0m6Y5rAB+31lFrSeAwdp8IVuH288fRTUR28JhWJVfTddEywH1VbXPcPqgWqckCw5FDigFTQn8tqUcrVrFxSMyuWB4Jsf1SmJTaS3frC5iZX41/zkjg/S0Iah3LYeZk5QRLeYEWPMRoR4TqE09gZuP747TG2BAuh23P8ixSXXo3ztXOZ4n/FM5BxQsVXIy9DsX1n4CX9ykJC4edf3+dgp+fECpXAXK/hx9G4y+GVIHwvW/4CrZRm1lIb7Y7uR7LQz57eXI5xJXOfzxuRKIUsvzESGOBHqthmuP7caibRXKubCuIvrK7mogHL29o9orR85+S4/vseeBlyMP4rvSJ81OIBRm+c5qxvSQ6yUhhGgpEsg5jFTUeusDN1tKa8mKN5NsM1Dq9FLp8pESY8RZqiRBLXN6efDLdTx27gDc/gClzZQk3lfa3lWd9qHTqMiKNZAcroQaF70Menb64gh1GYt6569NX2CKIz+UxNer8zg6N4HMOBMFVW7OG5rB6NwEHvtmPcU1SoLAX67JIbvLMZC3uHHFGq8TvrpVeQrUfRz0OFlZltwXracS1bIZ0GUMrPs0cqd7n0Hqzq9I2/GrkkQ5tT/MuhMGXaw81V/xjrJe+Wb49Dol4fKEx5T1hDhESTYDGhU8feEgdlTUsbW0lpwEC8O6xPH9uiK2lkVOoJ0WY8Sgaz6IcyD2js0k2Qw8dHIGR6WCKhQgz6Xj6UUO1hc5sZu0JOxTbeTLTV569TwT3YbPm25Ya6Ayph+Fjp18vnIXFw7PosjhIRgK89hewakH5pTQb+KL5C64E83OX5XS4FU7CA+8mOqj/s64/y1lZLaVu8bEklG7ApO3CrW1v1IZBuDzG5VAanJfCAZg8w9KIvNBE5Wgjt6qBHW9zqZ9VKmVaZl7gjig3IjNf0YJ0A68CI85jbUaEzN32vnqy0IePzmFo3fOjv6BbvwWhl2pVKoRQhwR0mJMvH7VSHR+B3Q9DjZ8HXnFbscR1lo6X46coBc0Omq84f1XrNpDZwJTrHKu7nocmXEm7EYti7ZVSCBHCCFakDw6PIy4/UEGZMTU36A98vUf/PuCgRzTPYHPV+xi4sjG5bF3VNRxwzvLeH7uFo7vFT3Hy6iu8cSaGsbUJtr0jM5NiLjux1f0JHHtdHj5GHhhFIaXR9Jj5eOozvwPxOc2XllnxnvR+3yySRmN8O7iPG46Ppckm4FxfVL4+0er64M4AO+vc+MfcBFU74jcUVeZEqyJzYLv7oTqPFSrZipP6YderiQ53VdMFpgTUX3zV+W1cx+Bz26A4/4Ovz6t5M/ZZ4oFvtrI2wKlxHnlNuUGMRB95JIQAAOz4qjzBZn9RzGF1W6+WLWL95bs5KQ+0ZNC3nh8d6yG/cfgY806+qVHDiqoVDAoM7b+5xR/AVfsvIdeHx5Hz49PYtzCy3nr2BomDo7nyfMGEm/RM6FfQ8Lv7dV+wifeCwndG29Yo6P8jNd5Yr6SANTjD+ELBkm0GlieV8VlRzVUh6pxBzj/3Xw+6f5PfDcsJnjNXPjLMqqO+yej/7eOY7ra+PfQSnp9dALWL65G89NDqCq2NH4/ZzFsnQuxmcqImP7nK4Gd9y5SgjLH3Bb5wxk6GTbNitw273GoLaG81suUN5fSI9nKC5OG0iUpRrk5iaTfuXDcXcq5qWIreCIEjzxOqNqp3Ny4qyNvRwjR6aTYjWhqiyBrBMTlNF1Ba4Bj/44q0MmqVoXD9SNyqr1hLAc6tQqUvIHVynR9lUpFnzQ7C7c1M2JJCCHEQZMROYeBkhoPm0qc/LKpjON7JTF1fE9mLs3n+3XF3DpzJRcNz2J4Thw9kq1sL3fxzqKGKVZ6jZq/ju9JolXPzcd354V5jW+ULHoND5/dnxhzwxP5eIuBZy4axCNf/cGsdcWEwsp2HjmzFwNKPkM99+GGDQT9qFbPBEc+nDeNUMVmQiUbCCb0pCpxGH+dVcaZg+MZ1jWRORtKCYbCvHLZUP757YYm+zl9SSmXjBhPNmFUaz6K/GGkDVICKK4yJWFp3kKlBOjcR5XqVItegm1zQa1TkhQPuFCZirE3d5VSFWvk9bD0NRh4MSx6saFdo4PMkcr3zhJwVypP90vWKlM1qneC3gLDr1amadiaqXgljmj+QIieKVZuO6knapUKu0nL+qIaftpQxu3jevC/uVvqq7epVXDHyb3ok2Y7oG3HWww8ef5ALnj5tybVse4Y3xOA4hoPqaEyVK+f2jiRd/VOkr68jIcnz8KfnoRZr+Whs/phMWgorqrln8Nq0b89BcY9oIyOK1oF1lT8PU/noR+q+HmrcsGebDNgM+hYmVfNj3+UcuqAVF64dCg//lFCrdfP0bmJ2GKM/FKlZn2hhhuPz+HjBdvxBkL842g7iR+cBaHd0858tWCNEuBSaZRROYMmwnsXKsu2zIb0wXD6M7DweSXAakuFo29VAlDvXxx5W1U7CLsdJIdq+OKaASwrCfLSvC2kxRjpMfhG4nZd23j905+Bis0w8xLlXKNSQc/T4LR/KaXPAcq3wPf/B1t+UG6OuhwDp/5LKX2ukT/DQnR6vlrlb//pT8Oaj2DdZ0oQpOtxynVAbSkkxrV3Lw9OcHfetN0jcrLtB/Hs15YKZQ0jMPum23l74U5c3gCWA3gQIYQQYv/kbNrJFVa7ufL1JWwqaZiGoVbBQ2f1JxwO88MfJUz7dRvL8+KYfsVw7pzQm6uO6craXQ7Meg29U+0k2QwYdRomjsqif6admUvyKa/1MqxLHJNGZRNr0lJR6yXeoq/PlZEWY+JfFwzizlN6U+cLYDXqyFCVo37pmcgd3bkAXCWof/ondWe8zK2/apm7UQka+UNqrh7TlUmjsjHrlaTKkXJ7+INhPl5bw9Q+faJXkBr9Fwjvztux81dIH6pMudi1DD69FgZPUqpcEVZKir99DkR6Sla8Bo6/Cxa/DH3OaFiu1sJF7yh5cfIWKdM7LInQ7zyYdVfDej4X/PYclKxTErlKHh2xj9IaD1M/XMX8LY2rKt10fC7+UIhtZR6mXzGcIocbm1HHgIwYEm2GAxqNs0fvVBvf3TaWdxbtZNG2SlLtRi4ZmcXyndUc/9Q8ju2RyLRBmzBEOpbCYXQ/PYzukneBOFJjjDxydn/C1XlYXxuj/I5/doOSDyGhO7h/Rb11Hn1THuCr3Zu485TeLNxawVuLdvDAmf247f0VqFQqju2ZSIxJx8yleTx4Zj+8wRC9Um3kVbjYUe6iX7qduMKfG4I4u/tDwVJl+uSWfaY47ZgPw69UAkp7l9765SllhM6o65XjPa4LbJ6t5Lzat0TXHsl9UO38Df03t9O160loj3kU7egcft9RycJgb07ofT6mDZ8o6/Y5Cyq3KgHivfu58RuoLYaJH0DADTPGQ91eiU53LoDXxsH18yFxn1FNQojOx5qqPDDatQz6ng3nvKQEmAuXw+c3QFIfwudP71xTq/ZcG6l11PjArD2I3tvTYfsvSk4xYwz90mMIhMIs2VHJCc2MABdCCHHgJJDTiXn8Qf43Z3OjIA5AKAwPfLmW1yaP4Ic/SshJMPOfiwYRtzvPhd2kIzepcfniUqeH6976nVKnl1P6pzG0SyyjuyUwa21xfSWpi4ZncUq/VNJiTQBYjVqsxr1+hUqcylOpaKrzIeDBOv8xBiQ9zNyNcNagdI7qlsDdn62pr5rz4Jl9SYsxRqyMlVdZB+s+h3NfVqY+7fxNaYjNhuPuVKZL9D9PeSqeOVJ58v3bc0rJYXeV8mQelFLDZz0XOYhT/0EGlWHSOcfC2L8p1WxyjwdbupLE780zlCdWx/4NZj8UeRtb5yhVdCSQI/YSDof5anVRkyAOwIvztvLq5cO4deYK/iiq4a0pI0nffcwdLK1GTddEK3ed2psSh5eZS/O48+PV9RWxbEYtuu1zo2+gcDledy16YywqlQqLQYuzbFPjUuG1pco/QAOcOFLDK2YdNx6Xi0YNG0uc5Fe6eW7OZl6YNJQl2yvZVOIk2WbkyfMGkl9Vx71frKXGHSA73sy1Y7uyo6IOg2tX0/4s+C+cN10JHq35GII+JRdOxnDoMaEhn9XeSv+A7/6hfH/ZJ2BNUgK8xpjIiYuPvhX8HjjnJTRf3kqO80q2jpzG0Ow4/vPLNioG38KEoTeQuGsO6u4nwVtnR/7sdi1TqvVt+KZxEGcPvxsWvQATHgdd9LxjQohOQK1VzotBnzIiZ99Rw77aqMnhO6ygMj08rNZT4wtjab5uRWP23aMRK7ZAxjDSY4zEW/T8tqVcAjlCCNFCJJDTiVXUevl0RYSbHZRgzo4KF9/8ZQyJVj2+YJidFS6sBi0JVkOT9fMq6vijSMnr8M6inbx02VDu+Wwtm0sbAjMPffUH7yzK452rR9YHcxrRmZQASrQn3ZZE5WLGXU2MIYxFr+GcIelc/ebvjV4yc2k+k0Z14eGv/2iyifVFNYR6DUTz6XVK9arRtyhBGY0BXKWEk/uiKliu3IwVrVLy45w/DX64T5laAWCOV26erKnR+2tJVEYZjPkrmGLgpPsa2gI+WPRyw7BjnQVqSyLvMyije1IHRG8XR5yyWi+vL9getX3J9kq+v/1YQv/P3nlHR1VvbfiZnt4b6QRC6L333hVFpAqKXbF3vZZrL9feK9IVEBSkSgfpvYTQSUjvfZJM/f7YSSZDJvjde9VL9DxrsTQzZ86cSXJOzn5/7363zY7ZaqPQaMLf49+5i3ZGp9FwIa+cj7ecd3rc112P1Tuu4bA07yYcSi3FvbyIhFBv3PVa3K0u8l/qEOunY80D/bDb7Tz+w1F6NwsG4FhaMbd8u5+uMf5EB3pgs9uw2u088P2R2tdeKjAS5uvGuJZe6OKmQnhrOPmj5ODU5DUsu1XO+5lroCQDNHrwiYJDc0XM2fG26wMLay+F1OZXRIydOB/WPg65p+V5N1/o96icr7s/hrhBMPSfsP4ZWrkVctfWYqb2iOZkVinDOrdC3aI7ZJ8UkbghjPkNZ/EAnN8iYpIi5CgoNG40emh1DRRdgo5TILKbLBQl/Qxn1kPLsaBtIFvvaqV6oatSpcdiA89/x5HjESD3hHlnIaILKpWKNuE+7Dhbf/FCQUFBQeE/QxFyGjF2IMBTT3mVhfGdI2kd7kNZpYWfj2ZwOLUIu92Ot7uOl1YlsS4xC6vNTqsm3rw8ri1tInxxrzP55khqUe3/t4vw5UJuuZOIU8P53DI2ncpxCi2txSNIsiFOr67/nFeoCB+mcsqaDufXVDNj2oez7GB6PR3lVFYpWrWKGT1jmL83pfZ5N520YBWHRhGg0cPB2RDdQ1a+TiyTgqr5cOx9HkRls8KcUfLCDc9Lj7pXqOR5qHXitKksgR53O7dF1DDgKRF/hjwv29fFVCotHjWoNTIFx+6cQ+L0fVH4W2G12ckrq8Jut+PjpsPjsnYom81OcbUr5nKaBXsyvE0YL6xIZNvZXOx26Bzjx8vXtqVFqDc67ZVzCgrKTZgsNtx0avzqiD97L9R3hXSK9iPb91oid73r8vfX0vshLpm82bb9AjEBHkzoEkXslaa1eYVi8PIn3EeE3n9N6ECB0cynW89hNMlq9IGUQhIzSlh2Ty/e2XDG6eWD4/3ooE0l+OJrqLZslwlQHaaIaPvjXdUr3mbs4Z1QHZqPPaYvKksVLBgH47+SoOEWI6RwqotaAwOfhjWPS0tmeS6svE/ybdTaameOHQ7NE9EI4MIWEXInf49nZSFGkz/94oOZ0j0at5prp879imPYcfNtONcHpNjR/DsJogoKClcldhu0GAWRXWH3J7DvK3ELtr8Rpi0F7yZyrWlMWGSaabFVFv/+3+PHQRbJfCMhx5GT0y7Cl0+3nie3tIpg7/oLigoKCgoK/x6N7K+KAoCxykJuWRXH04t5YHA8HaP92H4ml5d+Pom/p44JXaKY3iuGdhG+TPlyD+lFFbWvTcosZeIXu1kxqy/tIn1rHw/3dawI92keyIaTDTtMFu9PZWz7Jk5FIiBF1+g3sZeko8o84njcMxjGfQzrngZ3f9SdbuLC8UsMat2EebtSXL7HOxtO8/lNXRncKoTskkr8PfRE+ruTV1bFq79mcP+1PxBDFqr1T0veRTWq06vhwmbsN69GpfeUwi//PKx+1PkNJi2AVQ/B9J/APw72fSGBzKFtpXjzCIL1T0th1+9RMFWAvtqFpHUHv1jIOiZfJ++QkeeuVt71nhDSssHvpcJfj6ziSn48nM6209lM7xWLWq2irNJCQpg34b7uknPjpqV38yDWV7ct1uXZMa25a/6B2vYngEMpRYz/bBdrHuhHsxCveq8BKCw3cfhSEe9tPMOlAiMtQr14fEQCLZv44OOmI8K/vuvD203HO3vLeXb0VwSuu6fWSg9gbDOFoiaDePGLxFoR5ovtF9h0bwdi296A6sSy+gcx4nUpWKqJ8PcgzNfOd3f05K75B8kqqcTboOWbW7pyLqeM83XE4iAvPa/20RDy/UhHNo6xQIqiS7th+Cuw8wMqhrxGjl9nTF37Epm+DvfV1WHl5zZD036SgxXeSdqsyvOwR/eEQc+i2vF27RQVQFbOTeXww8yGRdiTP0FJJt4txzCpfSweeo1DxAFp8Wozvn4bBUgrppsf9LxH9uOKPg+JmKOgoNCosdutqCoK4LvJtQIIlUUi6CTvghGvYvWJblyjYqs/R4lV7vU8/l3N2S9Grt12G6jUtIuQe86d5/K4rlPE73mkCgoKCn9LFCGnkVFcYWLZoXReXZ1UO81GpYI7+sUxs08sH20+x3sbzjCgRRBNgzydRJwabHZ4Y10Sn07rgm/1WPHW4eLQqTBbsdklMLkhNGoVxRXm6ik7zn/ZLV4RXBg6G19TNoHG82jcfFDZzPDLc5iDWmMe+Cz3rc7ntr5x9IwL4NezeS6dP8UVFrRqFTfN3U+AhwGT1Vb9njCpUzBoPcFY6iTi1GKugF0fyUSqg3Ma/iADnoTld8o3sMstEJQA2CW8NO8MdLxJxJmgFhJaPOhpCfDTe0DfB+HUStnP4YUwca70gtcdj6x1g6lLpY1D4W9BTkkldy84SGmlmWfHtObxH46SV+YQR9pH+PD59K6E+7nzyLAWbDmVg8nqEBG6xfpz6FKhk4hTQ5XFxmfbzvPyuDa4650v3UaThcX7L/HGutO1j+1PLmTiF3v4YHJHxrZrQp/mQUzsGsmQVqFYbXY0asm8+fFEIaWmJjw2YTP+ZWdRmcqoCGrHqvNWInI02OpY5mx2GPvVCQ4+/CpuYe0lf8qYL9OXhr8iuVQqEa4KjCYsVhteblraR/ry06ze5JebMGg1XMov50BKIU2DvEjOl9akO7sFELLzWeeA4xrSD2EZ9Dw7+i7kq91lJGYcp3+LYN7o2UJaFjpMAe9Q+Ga4FA1RPWT0uMEHVc4pccxdHpAMEoyu0TkKr8vR6MFUhnrNo9x+22bs6mKoqHKMIdd7wtAXZQx68g7H63yjJI/Hp0n16OEnYfubzvvueBNE93L9vgoKCo0LlUpaMl1dS3ISsZfnotE1MheKRe4fi23/gSMHJFz+3EZZJPOLwc9DT2ygB9vP5ipCjoKCgsLvgCLkNDIu5Jbz0s/O2TF2O3y5/QLvTepIsJeB3LIqtp3J47a+TWvFmcs5kFyI0WSpFXKsNhtv39iBx384yvYzuYzvHMmhS0Uuj2Fk2zDuWXCIQC89r13fjqgAR993XlkVU7+7QFmVheiAaIbHezM0Vk/I2AWY9b5kGjXMGhLCgeQCNiflMLlbNOsT67t/VCrw89BxW984vtwu2TYatYq5N8bQJfVb3A8Vg7WB4gtQnd8EI15zLeTE9oWMIxDTW4JQ9V5iAc46BonLxZUQPwK63ynCjNkIh+dB7imYvLB6Sk88jH0P1j4huT/L75RC1t0PshLlBiaqh4g4ynjhvw2nsko5klrEuxM78MSyY04iDsCx9BJe+vkkb09sT9MgD36a1YeXVyWy+0IBBq2aSV2jWHbIde4VwJ4L+ZRWWeoJOXllpnptSjW8sDKRbrEBGLRqtGoV9y86jMlqQ69R88X0LkT6u7PxTBEbzxQR7O2DQetHZnEmOo2KDyZH1htdXm6ysjNLzZDe90P7idJWpDWAVwhlVRYKC4ysO56Ov5cbPm5yfYkJ9MDfXYdBo+bXs7kEeRtYfSyT18a3Y8tpCUruHqFHs3tXg59ddW4j36Rdw8nMEt6b1JGNJ7MpVHnhEdQC1fnN0h7VabpMtcIurZP7vpTQYXO5uOZOr3He6Zn1MmHm2BLXb9q0P2SdgCnfo97zCaTukfysvo9IBoZnEPhGwI1zJSerMFmuDz7h8g/EcdNrFrS7Ac5tAptZjtG7ieLGUVD4i6CyWuDitoY3OL8JdfuJf94B/R6YJSOn2CKOnH9byPGNBrVacsf8pB2/bYQv287kYrPZUV9pxVBBQUFB4TdRKsxGRIXZwlc7LjT4/A8HU7mmQxNm70wGZEU+IczbKf+mBn8PPWqV44/o9rN5/Hw0g/cmdSS/rIqWYT50iPLlaKrzVJe2ET5E+LnXjgef/s1evr+rF2E+0rZhB4wmK5VmG2eyy0jJN9K7ZTee23KRTacchWanKD9eua4t2aWVvDyuLW+tO0VplazE+7hr+cfoVqw4ks7otmG0buLN59vO82CfUHqnfob60BxIGC0hxw1h8MYW0Ky+jdndX9oZzBWO8eWj/wU7P5QxoTXknYXEH2HqEllNmjhPsjAqiqRQc/eF9pOh2RDZFjsExUsbWcLoho9L4S/NqmMZeOg1aNUqcktdC42/nMwiv6wlMYGetA734fPpXSirtKBSqfB1110xDNLfQ49OXd+cn1lUgdnqOmS8yGimwmTloy1n+elwRu3jJquN19Yk8c9r2nDPwoOYrXanY352TCsW7b3kcp/lJqucDz7hYDJiLsnGVHCUYquBCl0gnWMDKSw3k1Fcwbc7k8koqmDWoOZ0iPRh6cE0Okb50TXWn01JObx2fVt+ScxGq9FI5oy5vosQwKT34/7B8ZSbrLy2JokYfz2+59ej+vVdEW1D28CBb6VVymYRoaTvw+J6SdsnwsvlnPwJZq6V6XfFac7PdZkp+Q597ofvpjgm3BVckPaJbnfAoH+Ahz94Bsq/0NYujx13X/kXnOD6eQUFhcZJaRYWiwkNdlkUasjd5x745x7X70G1I6fEIqWC57/bWqXVi5iTdUyck0CHSD9WHcvkZGYJbSN8f2MHCgoKCgpXQhFyGhFVZhsZRQ2Py84uqaJHU8fNgr+HHpPFdfbDHf3jCPYykF9eRWG5GTetmiOpRdw1/yAh3gZCfdy4vV9T8stMrE/Mwg6MaBNKkJeBp5cfr91Pcr6Ri7lltUKOj5uOwS1DWHUsE4D7Bzdnwd4UNp3KcXr/w6lFPL38GF9Na09zfSHX3dYEdB7k2n1JyjOTWmAkxMeNW+ceINjbwOwbYgi3Z6FaWz1e+OI2uP4LOLLQ5eezd7kFdf5ZcdCcXAlluRA3AJq0h9T9UJQsgot/U3Hc1BVxaihJhxM/AGr49R2xCTUbCmPflXHneg/Qx4j7RkEB8HLT4u2mJb/c1OA2NjtU1nHJ+brr8XV35E3N7BPLiqMZrl7KHf2acjq7hFBvNwK9DPi46yiuMKP5jZVNq93OiiP193k2p4w5u5JZdX9fvtt3iaOpxcQEejCtRwzrTmSx7Uxu7bY+7lru6R7E4BgNTf0LocQOqLFvfR3d0UXorGY8tW5UdLyV/eHTuHNpCtEBHjw9uhVfbb/AuxvO8Om0zjw/tjWfbD3HXf2bkVFUgadBS0ITb3Ksntg63oR6/1cuP0NOxFBeW3OSW3o35VxOGa8PicJ77YfyZEhrWP8PyD7heEFppjjmxrwj52rcEHHjWUziIEr+VaZeqXVwzUeQkyhBxwYfcenknRZBaPu/HCJOXfZ/Bd1uFyFHQUHh70V5HpxeC9veQFucBg8nSovn7o9db992/J97fL8HNY4cqw6DBnSa/8BBE9AU0g7W5uS0DPPGXadh25lcRchRUFBQ+C9RhJxGhKdBS/dYf5cOG4C24b6cz3XkzQxoEYy3m5Y1xzMZ3iYMg1bN4UtF5JdXcU2HJmQVV7DzfD7RAR408XNn4e09SM4v591fznI8vZgHvz9C8xAv3p/UgeWH0vl+X6rLPJtjacX0ahZUe4wPD2vB5lM5GE1W+rcIZv6eFO7uFUqAG+zNMLPlTAF24IGefgQefB/tvs9FTNHo0LeZRHmr+yHAh1mLDgMwub0/wfveQNWyTgiqqRzSDkj7074vnY7HHtEFVUgrWPeUtEB0nCqrZD7h0voQ3gG2vQFNOkC7iVceD3xiGUz4FpoPgYoCKfS+mwI3/eAU6qqgADC+UyTzd6cQ6e/e4DZeBi1eBi2VZiuFRhMqwN9Tj0ErIbpNg714YkQCb6135N20j/TlyZEtsdvtZBRVYjTZ2H0hn66xAbz4cyIPD23BM6NbUmQ0s/p4Jin5jpHYMYEelFaa602Hq2Hn+Tw02HhuSBPM9ihseg/KKi30iAtg9q6L2O0Q5uPG9zeGErXzH2j2bpUXDvoHZB5BdarOlDpLJe4HPqVbhwpu6TaN2ftyeOC7w3w1oyu3fLuPjzef45oOTbi+YwQGrZp5u5M5ni7uvtkaFT/PuI2E5B2ock85HaNt+Kt4ebhzb0939mcU8+SAMFr42qSdys1XQkXrijh12f2JTI3Z8rq0TtptIsomjIZhL0PKTsnUMniL6LPnM1j1sOxz4nzY+nqDP0sublPCzBUU/m5Ulcl15dd3HY+ZK6Rd++J2xyCEGnrf73AANyaqBexiswZPXQN/QH6LwOZwfgsUXoCA5mg1atqE+7DlVA6zBjX/HQ9WQUFB4e+HIuQ0InQaNVO6RzNvT0q93Aq9Rs2U7lHMmL0PgBevbUOorxt9mgeRkm/kX+tPU15lYWCLYB4ZnoCbRkVKsYkVRzL49ZyjlSM+xIuvb+7C7fMOUlJhpnezQDz1WjpG+VFcYeZSgZGqOi6fUB8DbSN8nI4lJkCyPz7adI4wrZEVoyyEHH8VdVY+U6MGUdR/EmfK3ehj2YP2yHwRcQCsZgzHFtC6Io+LzZ6v3d/EVgb03y2GtuOcvyE735cbpMkL4fwW7BYTtBmHqjgNtrwG13wAa590BBCr1BIw2vchiOkL296UqVXZl9101UWlkRAijU6CT8M7QatroDBFEXIU6hHp787tfeMwW+z0aOrP3ouF9ba5d2AzVCoVL/18kpXVzpvrOoZz14BmRAV44OuuY3qvGEa0DWPX+XyaBXtgtcGsRYcoqg5B1qhVzOgVQ/MQLx4cEs/SA2nsSy7A30PHHf3isNrsvPhzIgathg8nd8Jdr6l3HABDW4XwzjB/vJK+QHNmDRo3X+h9Px5NOtKneRDL7u7Nm+tO8UQvb2JXTZY2wxrCO8KWV13u1/34fCbfeCuz90lI8+ZTOfSLD2bfxQJCvN345WQWTfzca0UcALPVzq3LM1gyZR7hledRnV6NyuADcQNQn1pD4C//YFjCaIYOeg7Nuucg7gE5L/1iJIOhIeKHi4hz4gfHY3Y7nFotgnCLETDgCTi5Qj5Puxul1ar9jb/ptrOrdSgpDwoKfzPKcyXo/XJ+vBtGvib3Ghd3gMELmg2GC9ukBTth1J9/rP8N1W2uJWY1nrr6WYv/L3yjQWOA9MMQIMJNx2g/vv01mSKjqf70UwUFBQWF/zeKkNPIUKtVfDSlE+9tOFubUxMf4sUjw1vgrlNzV/+mXNcpErPVxqnMEirNVuxAeZWFKouN9Sez2XY2l7UP9OOzbeedRByQVotHlhzjmxld0GrUzNmVzKgPZRrL8NZhfDWjK8+tOIFBq+ahoS0oqTCTmFGCChVxwV6E+bqRV1bF08uPMatnEIGHP0Kz99Pa/XtlHsXr2LdETlsGh3bBsJegohA2PF/rttGcXcfQgS/w8ri2VFqshOhyZRU9/QDE9nOeDrPrIxnv2WESRX2fw6f0HJqfH4RpP8CKWTJNpga7TUKL3Xyg590yTeHHu2R61em1rr/h7SfBjnfgzFpZxY8bJLkbauXUUaiPn4eeu/vHcSG/nNv7NSPUJ4O1JzIxW+34uGmZ0TuW0e2a8PSyY1zfOZIBCcG1E6Te3XCaR4cnEOnvgbebDm83HYGeelLyjdz4+W6n6VZWm51vdybTLsKXOTuTOZYuWVYXgUOXiri2QzhzbulGTJAnEX7ulFSa6RDpy9E0R+bV2xPaMzLciNeCYTLmu4aL26DjTXgOf4moAE/uHRBHO49UGe19eq2cr2qtuGEawmbBzVpa++XprFIGtwwhr6yKvLIqHh7agus/dQ429jJomT0+nCbbn0bdbrxMhMpOlBYmqwhY6tNrZBpcy2vESdPmeri0R3KrGqLZIHHRueLCVuh5r2TeTF4o14yqcpjyPWx7S3KymvaXVXYXWGP6yB9RS5WEHVeVgs5DQpAN3g0fk4KCQuOlLKv+dD2tAWJ6wYr7wDtMFn1Ks+DQPLk+3P3r/+ZY/xsslaDWUGxS4fHvBh3XoNFKe1X6IRHJgU5R/nxtv8i2M7mM66hMr1JQUFD4T1Gq0UaEyWLl618vsjkph+k9Y3hwaDx2O6QVGnl9zSn6xgdy94DmPLT4MIdSimpfN6BFMB9N6cT93x2mymKj0myjpNLCuhNZLt/nfG4ZHgYdU77cQ26ZI7hv5dEMdp3P48PJnai02HhkyZFahwBAVIA7C27twdrETI6lFRPv7uYk4tRSWQxbX4PBz8G+L6T4GfYSrH+mdhNjfioZRdEMbhVCJWV4AOz9Em78FqpKIPOoY3++kdh7zkJTmoZarYHxX0pvd6nrz8ehuXKTFdtPVuWNBRJafH6T83ZBLRxtWCCr+Oc3Q1EK3DgfKoolwBTkRs1cIcWn5t9NBFT4K6HWqPho8zl2nstjXMcIPprSGZvdjsVqY/nhdMJ93ZjYLZrnVpygoDpLx12n4YEhzTmdVUq4r3vtNI/kvHJ+OZntJOLU5ZMt55nYNbJWyKlh5dEM7h7QjJhATwACPA18NLUzt87Zz7mcMu4eEEeA3oJ++2vOIk4NRxZA9zvQqXzonL0a/cX10sI0+m3IPwtb3wA3P8f2HadCq2tl4ptGB6XZuHkFAEWAOJWaBnvy3JhW+HnqyS83SWByHZ4dFEL8jofQZOyH7rfB3i9cf4OTVsgUqIxDkklR+qRkVjUUlKzSiojbEFWlItLu/QI6TJWR4aselNZNjwAY/xVkHpNWq7r0fgBLaT64ZaM9OFtW6M1GWY1vORZGvi7T8BQUFP5aaF20ztrt0O9RSN0n9x51F4d63iuulMaGpRI0Boqr7Hj8N9VCcAs4tUYckHpPAjz1xAV5svFktiLkKCgoKPwXKEJOI8JosnIstYj0ogreWHeq3vNtw+O4f9Ehp1V3gG1ncjFo1dzUM4Zvfr0IgNlqw2aXFo1+8UHEBnqSW1rFxqRsusT4s+pYhpOIU0NemYl9Fws4l1vqJOIApBZU8OjSowxpFUK32AACs3bUe30tF7bIVJjwTnB2gxRMXiFQJqHIBu8gzuWWUVJh4eG+QbKqrvOAo9/L1CmfcBkd7hEExnxUCyfg02UmtBoHZ3+58sjvqlJxFOz7QgrQdU/JVKr2E+HYYhFl2k0Ar1BZob+c/PNQfEmONboHFJyXfvn8s9CkM/S4Q9o9jPmyTWUx+ETIRKsa4UfhL4vRZCUps4Qqi40lB1JZcsDRjqRRq7h7QDNu+novFpsjc6DCbOXNdaf5dFpnSqvM+LrrKTKa2JCUTWqBI+9Gq1YxICGYKH8Pskoq2Xkuj5DqoPHL2Xwqm9bhjrbH6AAPvrujB5nFlbjpNJTlpKA/s6rBz2FP/BG/gosinNRwahV0vVVaGg0+ENwSutwi58Dimxyr1P6xBE6YS4tQL87mlDGqXRPuWXCQFbP68NSyYwxuGUr7SF+OVV+rVCroE2ZDs3WPCETG/Ia/wTar/Nv0kkyhm7QQbGbskxaiWnyTo1UTIKqHuGOuhMFL9lezYuzuJyIOiMj1y7Mw4Rtpj0jbJ9eFNtdD5lHcji/A7hvlEHtBRKOklRK2PPk78Aq+8vsrKCg0LrxCpLW6NNPxmNkIPz8kwm/KTri0GzwCoe0NMv2uqqTB3V21mCtBK0LOvz16vC5BCWBbCZlHIKYPAJ2i/fklMQuTxYZeW38So4KCgoLCb6NcPRsR7joNTYM8G3w+JtCznohTw4akbHo1c0y0ctdr6Bnnz+ybuxId4EFSZgkGnZpPpnXmhs6RbEzKbvB9fjmZTWyQl8vnDqQU0i8+mOfHtkbzm79ddtjwghSGSauklzwoHoa9TIXGCze1jX3JBVSaTNh6PySFo9YggkjqfgkhXXYbrH0cutwqoaPmchFimg+Dof+E0Lb139bNV5wDuaeg5Rjo/5iIO6HtZIpVWFvwi4Xvp7p2K3gGQUmWjC7OPQVf9IOj30nxt/9LWHqztIR8Mxy+HADzroWPu8Cax6G04e+rwl8Ds9XWYNhxv/gg1idmOYk4dZmzK7l20lyV2cbR1CLiQ6VFp0/zQL65uStNfNxIyizB26Dl02mdaWhgldbFmPJgbzfaR/rVGTN+hQBLSxXo3GSVecgL0Ga8uG0OzJab8apSuPZjccHs+8q51aAwGc38cbw/MoiXx7Xlp8PpaNQq8sqqOJJazPf7LnHPwGaoqo/dXadBa6yebGcqvbL4otaCtUraHosuyflvs6I6+j1Mmg9j35cg5onzYcBTIrzGDXK9r04zRCAe844IUlYzFKU6b5OTBAsniAswvJPs+9xmaZloMQLVkQXiwrmctP1Q6nr6mIKCQiPGuwlMXiQO3BpMRllcWjgBck/LPYh3GKx+VCbflTXCv/2WCtDoRcjR/xdCTs1CXY1ADnSL9ae0ysKeC1cQ7RUUFBQUrkijF3I++eQTYmNjcXNzo0ePHuzbt6/BbRMTE7nhhhuIjY1FpVLx/vvv/3kH+jtg0Gm4vV+cy+fcdGqqzA2H0dntOI0id9dpuGdAc+6cf5B5u1PYe7GA5YfSuWPeAZr4GvAyNOxo8XLT0jxY7LGuMFttaNQqKmMaKJ4Amg6QP+qWSnGsaPTQdoJMoUr8kZCfJvOO/zLWzWxKk3NLUe//EpJ+hsMLYO5Yycvp86CILrdvhNDWkp1hs8p0hEUTRVxpPwmu/QjUGtB7yWp/11tlxWzoS9J2EdxSBBZLpUyyOrIIKgulwAN5TXgnaDESbpwrApHNBPHDpIAzOIc90/8JuZkrvqwgPL5Y2jeszk4mhb8OaYVG7llwkCndo10+H+7rxtns+pPfariYV06NxuOu12CzQ5twHzpH+TKlWzS3zzvAgr2X2HuxgKUH07h59j6qLDa6Nw2ot6/BrRrOjdFp1Kw5V4EpfkyD26hajqmewrINDs+X1qWpSyC8s5w/Wjdx4uz93PUOKotoVpXEoZRCVh7NINzPncSMEnQaFe0i/Qj21LHxkQF8e0s3HhuegNq72rlis0LBBTnnXNF2PJxeA6Ft5GuTUc7BzjNA5wmR3eW41z0NPz8A/tHi4usyU647nkFSVNyxFTpNg+IMCSYtz4eo7hLiHN4JAupca+12EXIC48Vl16S9hKkHNpfJV1O+h36P1W+rLLjQ4PdXQUGhkaJSyb3CPbvh2o+xdb8Lu94TghPkWtD1Nrl+tBoHI14XIcPzCjleVytmaa0qMdnx/G/9+0EtRNyuXjyIDvAgxNvA+sQGWuAVFBQUFH6TRi3kLF68mEceeYQXXniBQ4cO0aFDB0aMGEFOTo7L7Y1GI3FxcbzxxhuEhYX9yUf7+xAb5Ml7kzripnP86AxaNQ8MjsfTreG/tGoVtfbV1k280WlUvLAy0WkCFUi98tyKRG7uFdvgvsZ3imD3hQJeuKY1Dw6Jd3rOXafBx03H9G/2seikmfKOt9XfgcEbet0nORwANgu2XrOwH5wrrpWMQ2DMR9+0D5q0/WhyT4qYct2n0P9xec3Jn6SoGv2OtDwk/igOgtnDqnNv8mVVbMNzUJAMM9fD2Hdl5b3DFOhxtxSiK++TwOPji6sDDK3iMvCNgR53SbE29l1p7Wo5Vlbgji6GtU/A4mnw84Nww9dS5IIUiFVlDbeG7Pui4ewehUaN2Wpl3u4UjqeXcDa7jAeGNEencaxieuo1XNcpgnaXTXmrS3yIF+46mTDl467j0eEteHfDGV6+vh2vrknCbHV20Njs8PLPJ7mrv7PAe1f/OEKrW66sNjsZRRWczirhYl45xRVmgrz0rD9TSlbXx6U96TLs7SbJCvLP1VkxBRfgyEJpnxr0NHaDL2b/ZiJkdJzqLHrU4BuF1VKFj7sOrVpFSYWZbuF6tt0cxqfBP9J1/yP4nVlGoDWHjUnZePiFOhx02/4lzpeY3o79qdTiCmozHtIOQWALuHW9nH8laTJJxlQmwtLZjTDjJwkyzzsPbt7gFyniz43zZFrdjndgwQ2w8TnJsmkzTsYGWy0QNxC63QZTF0uWls5DsrcOfitC8prH5HsxZ4yc+xtfgJxEuO5zZ3eOMtlOQeGviVojU+06Tyez1z/lvuaGb+X8Lzgnwm9pJug9YPqKxinkWCpAq6fkv22tAghuJUH51VNEVSoVXWMDWJeYhbUBh6qCgoKCwpVp1Bk57777LnfccQczZ84E4PPPP2f16tXMnj2bp556qt723bp1o1u3bgAun28MeBm0jG4bRtcYfxIzijGarHi76Vh6IBWVCrpE+3PwUv2Rx2PaNcFqszPv1u4khHlTbDSTnG908Q5wPrec2EAPRrcLY81xZ9FhaKsQLDY7i/ensnh/KjP7xDKjVwzzdqcAMlp5x7lcskoqeWt7JYEjZzBu4jAM+z+VNqXontLOtOEFUOug663YwzthrSxHd+pneROVCq77TEYGZ9cZK3xkoYwJvu5zMHiK42XrG1K8DXsZtr9Z3+3S6hoIiIX54yRoD+QGrNcDso/sE/JY1nFppRrxGoS0kq/bT4LldziPNtYaxOEDMj0r84i0WHSYCge+kZu14ksN/wBNZSI4KfzlKCg3s/KItNJ8tu08Y9s34fObulBUYUanVhHopaddhC9BXga++TXZZYDxrX2aUlJpxsddnB0Jod7c0S+O7JJKMosrXb5vaZUFL4OWiV0jKDJauK1fU1qEeOPrriO7pIItp3J5a/3p2mDlPs0DeffGDrx+Q3vuWp3IJ+NXE3zuB7yTfwE3X0w9ZqENboHq0+7138xUDidXQu/7se78EN3FDSKy9rxXCpi1j8vXI98AsxH31F087naBW28ey8E8De2Lt6Bddb/syzeSwMoSAtU/89qAFyittOA9/GW5NmQdk7bJIS/Ivspy5TVZx6EkE8Z9JOPC3f1FrK0JOVZrxanX6lqYe41MnAptAz/dJc8bvCFhNHw91JGlYyqTyVjxw2HXJ87XHK1BhN2EkXBwbv0x5xWFsOJeGPUvOV7fKBkxfGq15Hj5uXZmKSgo/HWwWsygs0DhRfjxTse9BojYfe3HkufX2DBXYla7YbTw37VWgYheOjdIPSCLcEDPpgGsOZ7J/uQCesYF/sYOFBQUFBQup9EKOSaTiYMHD/L000/XPqZWqxk6dCi7d+/+Hx7ZH49BpyHCz533N55hzfEsKqpbqmKDPHlmTEveXn+G3dV9xyoVjGgTxh394/j1bB59mgehUakanIITHeDBuI7hqFQqOkT6MaZdODvP52KzQd/4INILK/jnysTa7b/dmcy3t3Rj3fEspveOYWz7cMZ9ImM27XZ4Ym06P8cH8fmQF/G8sFaEk4UTYcjz4BmEPXE5qsML0NRdxY4bDCm7nAuq0DayQl9wAdIPys2Rf7TkcuSdkcyM9EPOH0atlXaLRZOcp9bYrLDzPXHb+DeVm68aNr8C/rHSPnF+c/3CzVIFK++HG+c4xqCfXiMtV8eXQNwAaD4cUMHxpfVbKzwC5GZG4S+HCgkzrmHVsUxWHcvEXafBZrczuGUI3ZsGEunvztxbu/Pg94fJqc6q8TZoeWpUS7aezuH5FSf44Z7ehPu54+WmI9BDh9sVWh0B8sqqmDUonlAfA2462bagvIrtZ/J4arnz7/DOc/kcTi1i8f5LPDmyFSuSi/B0n0KPwTfh7enOshPFPJK7wvWkJ4MPtLke1TfDcKs7fjxtv4img58TV836pyHvLCrAE/Dc8zaRd2xF/fWDkhsx/FXJ2Mk8Al6hxOhLsHj6wne3Q9+HYfA/QKWRsNDZI+pPo2o3AbreDnNGyYWmBptFnDYhreX17SbAwhsdz1/7ibQ3mi8TsdtOgMPznK854Djf79oBvzzn+ptvLJD3dfOVtsxrPoCMw3DTjyLmKCgo/KVpoilBZTLCT/c4izggDpRf34VRb/5vDu6/wVJBqUoy2v6rqVUgC2iBLSB1D3ScAkDzEC+CvQysOpahCDkKCgoK/wGNVsjJy8vDarUSGhrq9HhoaCinTtWf6PSfUlVVRVWVw0FRUnJ1TB6otFi5s38cgxJCKDdZCPY24KHXUlxh5v4hzblrgLQ6+LrrWH08kwmf7RbxZv1p+jYP4sVxbQj1MZBdIp9NpYJnx7TCoNWQXmhk9s6LLDmQRqdoX94Y35431p7mmeXHKam01DuWC3nlfDClIz5uOqw2W7381B1n83jTJ4T7Oo4gKPc06hu/xW63oyo4j6qqFLITUdfkXQC0HA3b3nJ87d9UVuaX3yF5OjUYvGVCxC//kK9Vaufis9kgOLO+4dHDB76FDpMlNLkGs1GKMp9waZdwhaUKClNE8ClMFmFI5w43fAMH58CS6dJS0ft+abPaUKcA7PeY0m7xH3K1nos1BHrqeXhocyx28DbouFRQznf7UrlUPXVqWs8YdBo1+WVV+LlrmX1LNyrNVsxWG2VVFhLTi0kI82HB3kvsOJtLWoGRcrOVse3Ccddr8PfQUXjZpDiQfCy9Vk1iejExgSIcWG120gsr+HDzWZkI1SyINuE+lFdZWJeYBajYcjqPLafz6BDpS4iPGz8dreBkZgkdIn15uIOu3vsAcr7s+8L5PKwh6Wdx5hz/AfLOXvbNaY764jZpTRz3Cax6WIKKq1HteBvtxAXYJ3+HqjRTHDjmCvj1Pdcjxa1mycCyN2DJ3/819L5PBOEaEkZJ6+P5jfW3bzFCHDWusFRKu6dvpNMxO2HMk+tRcZo4+m7f9JcWca72c1FB4c9Eix0KLjY8mercBjC/+Ie89x96LporKUYGW/zXrVUgeYTHl0BFAbgHoFKp6BEXwJrjWbxwTRt0vz0hQ0FBQUGhDspV8zd4/fXX8fX1rf0XFRX1vz4kUgvK2ZCYTXKekaZBnpzOKuXOeQeZ/OUebp1zgGeqV+ANWjXXf7qLr3dcdHLg/HoujwPJBTwzulXtxJu7+sdxMqOEZ386gUqlEnFmckfGtAsnq7iSLadzXIo4ghRTM+fs51xOGY+PaEGEn2TG9IvzY83NMcyKTUVXdBHVkOewp+5D9cs/JLg4th/0vFtElxo0Bufirff9sPbJ+sVjValk6vR+EJJ/lfDhuniGNFx4gbRAebnoW1drJLS0JvfGFcY8R8ixu7+MF/9+irhzSjOl8Fv1sITBdr9T8ngGPCkjztWahver0CBX47lYlwKjmfxyM2+vP82sRYdYeTSDh4e1YHK3KIa1CiEh1Juc0ko+33aeaz7eydiPfmXC57t5avlxtGo1Px/Lwmq3ExfkyZbTuaQUVDD712T2XMhn1dEMXry2Te2Up7o8MDiexftTuVjdKmk0WcgsqsBis2PQaphzSzdaNvFm78UC0goreHp0K7zr5GkdTStmw8ls0gqNDEoIIcTHDXvzoa4/ZExvOLuh4W/C6bXODrcatAaZKNdxmjhiLj8vbVZUP9yCqiJfpr5tfR0iu0qosis8g68cJFycKq0Mda8ZHafJ+VgTYn45Gr3LvCBAHHo+EQ2/X42LzytErl2NsY3i3+BqPxcVFP4oTBYraQVGzmSXklpgpMpSvZBTUaelXe8pLaY12O1g+2OGHPyh56K5giLEkeP137ZWAQS3AFRO06v6Ng+ioNzEjrO5//3+FRQUFP5mNFohJygoCI1GQ3a280jH7Ozs3zXI+Omnn6a4uLj2X2pq6m+/6A8ks7iCC7nlzN+Twre7LrIhKZvZO5Odxhkn5xt5ePERNC7GD9fw7oYzeBm0fDWjKyPahNIrLpBlh9IBKDSaeGBwPF/tuMArq5M4kFJI15gGChygV1wgey7k8+K4Nmw6lcO+i4W8Pr4dWx7uzVeDzLRePozQtbfj7x+Aas5oVLs+lEIu/5yM5dz6pogiHadVf8gjkldRg3eY6+IQpGDzDIKj30P3u8CrjkOr8KLr8eM1hLaVVbTIbjD2PZg4DyYtkKBjkGyOqUukPcPVa2uOqdcsqCgWZ87lHJwDnW+BWfug3yNSgCr8R1xt52JdCstNvPhzIq+vPUVemWTRJGWW8vDiI/RuHsibN7RHrYIVhzP4asdFp/M1Jd/Iw0uO8NiIFiw7mMbYDk3w99DhZdByz8BmtI3w5ZOt5/F11/HV9K4MTAgm0t+d3s0C+WhKJ3LLqtiYlEO7CF8u5pbx9PLjnMouJaO4gufHtuaB74/w9Y6LHEktYuuZXB5dchS1SoVPtZijUat4Y2Q4aye483nAQj7xnY9Gq4dut9f/oCqN61HbNWh0rjOg8s9Bk44SInzOhSMG5HXF6eJkKUkXd0tD71V4seGpViCtmNknZApVDXab5Opcfj7H9IGAZnDNhzKRbtpSmWxXl6AW9YXiGqJ7Qf4FsJqgxz3SntkYRw3/G1zN56KCwh9Fbmkl7204w7D3tjP8ve0MfXcb3+y4KG3cIa3lWjJpgdxPDHtJriWdb5Z7FL3XH3JMf+i5aKmkqNqR49WASfPfQu8pWTmpe2sfig7wIDrAneXV958KCgoKCv9/Gm1rlV6vp0uXLmzatInrrrsOAJvNxqZNm7jvvvt+t/cxGAwYDIbfbX//DTabndzSKmYtOkxZlYV/TWjPG2tdt5EVGs2k5JcT7utGhouQ1CKjGXedhrvmH+SO/k05lVVa+9y5nDLSCo2cSBeL7vf7UnlzQntmLTxUm8dTwy29Y6iy2MgpreKDTZJR8/iIBHaczePGZlbcfpgoBVrLMRIAWuZioljWMchOhIguEDcAe2UpqrA2cGadBJHaGnICVWM1i6V59SMwca5MtEneJvkabW+AfV/KfsI7Q/fbQe8NarWssBdckGyL7f+SYNSIbrDsdmmnALlB6/sQ9H1E+txB9lOeJyv4Q56XArH8CoVb9gnoMOnKn0HhN7mazsXLyS2rYtWxTJfPvb7mFD/c3YukrFK+3O7aRVJkNJNXasKgVeOp19K/SzBJGaUk55ezMUl+t97feJbrOkcQ5e9Br7hAckqr+OfKRPLLTUT6uxMV4M7JjBJGtAnDoFWTUVjBsoNpFFfUXwl+f+MZXrmuLQ8uPsJ7YyMYdul93Lcud2zQfJCcPzd8AyeWyWpzdC9pL0oYA0krXH8jEsY4tzPV4BUKHoHY1VpUDbU6gjhoahwzyTtE0D2zrv5257fA0BclpPjy1iuVSlx630+BsR9IOLlvlOy3y0zwDoVL+yDzMLQeB+0nw7cjxeEHIh51mQkjXoX1/5DrUpGEuTP4WdhTHdyu0UOb62Ufqx5yTNS7sAVbWQ5qv7+uS+VqPhcVFP4IyqssfLDxLAv2OtyEVRYbq45lck/vJuDmD11ukSmYNTk5KrUI4pMWgM7zDzmuP/RctFRQbJfj/l0cOQBBCXBxmwjfGj0qlYp+8cEsPZBGkdGEn4f+93kfBQUFhb8BjVbIAXjkkUe4+eab6dq1K927d+f999+nvLy8dorVjBkziIiI4PXXJQPFZDJx8uTJ2v9PT0/nyJEjeHl50bx58//Z5/j/Ulpl4afD6VSYrYxoE0pCmDePDm/BznP5rE/MclrlB0jJLyesjpDTKcqPmX1i0Ws16DQqwv3cmHdbd8oqLVzIK6t9Xadof344mFb7dW5ZFR9sPMMX07uw+ngmB1MKCfTUc2vfprRp4s3ZHMkCAWgW7IWPm45lB9O40y/FsTofNwh2vt/whzu7HrzCADuqrrfBuqdh0nyZFKP3ktYMVyv9Gr08ByLo2GwQ3VtEmaJkEXdu+BqSd0JwAmx6yTH+e9CzsvJ/8FuxRre+Dr6b7Lx/mwW2vy2jh/1ioOkA6HmPOAJGvAqHF0ob1bhPGv5smt9jKUvhauZEev3MmN7NAhnRJgw3nZoqiw2L1UZuWcMTy5Lzy+kXH0z7SB+sVjvB3gaiA9xx12kY1CKYSqsNb4OWtuG+fLrlLJtOiRW9Z1wAr17Xjnm7U1i09xJVFhtf39yVIG8DW0+7tqvvvVjAzb1iWfdgP0Kzt+N+arnzBql7Ie+0iKAtx8j0pbR9ko8zaQFc2iXjvuvScaoULr1mybY1DjXvMFmh/mEmqv5PyL4aankMaSUuOxCX3cS5kH5AhNO69HtM9jHtBxmRXj3SFp9wEVu8w2DGSnk8JwkqiiAoXoKQvcNk6lXmMWjSHr4c6CwW220ygW7km9D/CYjoJOKuqVyuKyNeE4dOzecry4ZRb8nxVotOdjuUVDimjykoKDRu8sqq+G5/fbdLpL+7DFuwGOGnu52duXabXEOjezqcvo0JcwWFdk/0atBrfichJzhB7veyT8iiGNAvPpjv96fy0+F0bunT9Pd5HwUFBYW/AY1ayJk0aRK5ubk8//zzZGVl0bFjR9atW1cbgHzp0iXUddqLMjIy6NTJYcd/++23efvttxkwYABbt279sw//38ZksZKcb+Sbm7uy8WQ2t87Zj80OQ1uFMvuWbjzz43HSCh2r063Cfdh9XqZXDWkVwuh2TXh+ZSJFRjMPD40nKbOEL3dcoKzSwjc3d6t9nQq4TBPiWHoxa45nMqlrFFO6R+Nl0HAyo4RDl4rYeS6/drsbu0Qyb3cyvu469GVpUjz1ewwiOsPuK4kdBhFlWo6RwqgoGb6fBgOfEmdArwfAVCKtGRaTiDcXt8t/Mw6JKJMwEvLPS87FqoekFaL3/VCUBq3Gyqp7TTiqSgXhHWDrq/J1i1EyVrkhDs2HCd/Cnk/gy/7OY857PyhTalyh1krA329Rni+5O5YqcPeTQGRFAGo0+NYp2L0MWt6Z2IHDlwr5fNt5yk0WBrUI4fb+cfRtFsiv5/Nd7iM2yJO+zQPJLKrkyeXHSK7OvIkL8uSJkS2ZtzuF3efz8TJoefHaNtzZvxlWu50mPm68v/EMK446HEEVJit6rfry3HEnLuaVMzrBG375qv6TRxbBhG9g6S2SZQWS8xTUAlL3w7Ql2M5uQn12vfy+tp0g2VAnVwB2EXt2fyLunP6Pwy/PQkmGCCT9H5dJUJfTfIiILjWCbVUJHJoHM9dLQOal3eAeAG3Hi6vOPxZMpTB5EVSWQHmOiEQ2u4guiyY6tzjpPeH6L2DrGyIU97pPjrchx9+Bb0SgnTPacb5f3C7i0IyV8O0oEbPcfCVI2T9GhJ6cJCxqAxdyy+gY3XBLqoKCQuOhyGjGevmNEeCh18h14PgS1+3VIItBU5b8wUf4B2CppEjt/vu5cUCEdHd/ycmpFnJ83XV0jvZj0b5L3Nw7FpWrMDgFBQUFhXo0aiEH4L777muwlepycSY2NhZ7Q1NOGgGeBi139o/jsaVHnQSbJQdS2XI6h7duaM+tc/djt0OApx4fNx0PDWvB0gNpjO8cwW1zD2C12ekc7YeHQcurq5Nq97EuMYt7Bzbj063n2Xk+j1Ftm3Aktaj2+ZfHtSUps4SJX+yudf7Eh3jx1Ywu/JLoKJZCfAxcKjDiadBiihkITbvIWPCcJGlDqGlPupz2E6VIW/MoDHsZ2k2UtonQNjBnDNy8CnZ9IOKO3SZCTMtrZCR5VTH4REn7Q0BTmYjQ9gY4MBuKM6DPA7DlNecJN25+UljWPFbjOGiIoktSsBanOYo6jU5yfcLayI3JwTn1A5mHvuh66k5d8s/JRK6a8el6Lxj0DHSYIuPKFa56EsK8cdOpqTTbeP6a1ny0+WxtayLAiqMZbEzKZuEdPTn01R6MJucbfn8PHd1j/SmpsHDzt/swWx2/qxfyynngu8N8fXNXDiYXUlZl4dGlR/nm5q646TTMnLOfewY2dxJyvt2ZzJOjEhjVJowEfzutArWkldn49lARqQUVhHgbmNDeH7OxBF1Ac7iw1fkDVRbBxhdh8neQtBIC4+XcyjwGeg8RHtuOl997z2BY+5i0TXoEwvWfw6pHoN2NkjXjGyXnP0DWcUjdJ9Pmdn4gq7Lu/tB5BjTpIG0Jdel2OyydIedEWDswV4JHMGCTaXAZh6Rtq+ttEsR8Zr2sjp9cWT+nxlQu7p2Rr8PyO+G6T+HY9w3/UItTJbsroBkENhOxyFwO130uz498Q9pCwztD4nK5RkR0hRGvYSwpZsERCy1CvfH4jdHxCgoKVz8eetdDCuJ8qid1Xj6pry7FaX9Y2PEfirmSYp0b3r9nt5NKJQt8qftkEEQ1Q1uF8vraU+xPLqR7U+W+R0FBQeH/g3KH2Yhw12k4kV7sJOLUkFtaxa7z+QyIDyYh1JtxnSJILzRittkZ2z6MradzaleTJnaL4r0NZ5xev3h/Knf0i+PTaZ1ZezyTwS1DWHEknTPZZfRtHkR2SSUL6/SGD2wRzC19Yll1LJOBLYNZdVyKyLTCCuJDvTiRXoKbTwzMmyoF0+JpMGmhBALmnHQ++JZjJXujPF9W4435EhqoNYgzoOtM2P6WTISqwW6XAtNSKQVVyg745TlH8RbTW1qzLKbqDJ4Tzu9pqZIAwho8gyGsg+t8D5AiUq2DFiMlxLCiUASlU6ul+AxpI3kiKbvEneMbISHHhclweB6UZkjB53vZ5JviNJgzVtwMNZjKYP0zImx1nOL6eBSuKkJ8DHxxUxf+8eMJ7HacRJwayk1Wvt5xgVeua8uTy47VijVNgzz5ckYXbDY7JZVmPp7amW2nc1l2KI0qi+TJmKw2fj6WwdDWIaw5Lq2BSw+k0TTYk+R8I/4ezu6tQ5cKuZSZwwf9rKi3vIrm5AnwjWbskIcp9kmgifkSnmvvQFVZiL3ZEFTTfoB1T4moaPAR90rOSTizVgSbXR/D2scdb6BSoR7xhoin0T1EGC3LkXN36xtw7YdwbAkcXyptR3U5PF+mzHW5Gfo/jj24BaridFgyw9np1nGauICyE+Xr1L1y7plKRNCtcdKUZMDK+6DTDHH1aHQi8LjCmC/OP41eRpxHdIXEH11vG9oGsEtb5Z4vxFnXdgJVnk1QVRahL8+V79XSmx2vSdkFh+bhPeNnMouNVFpseBiQ1q6SDMkbqiwSETqkpaxOKygoXPUEeulpH+nLsTTnxZowLy0qdz8I7winVrl+cWgbyeVrbJjLKVK7/T6jx+sS3FKEnOL02nuithG+hPu6MXdXsiLkKCgoKPw/UYScRkRJpYU1x10HqgLsOp/LR1M688X2C1z3yU5MVhvuOg039YymV7NA5u5OwW4Hb4OO7BJpX1CroH98MNGBHqQXGVl+KI03bmhHidHM+5M6sedCHuF+7jxVPdIcoHO0P9d2DOf2uQew2Oy8M7EDrZp4k5RZypIDqTw8rAXvbTiDKvu4FC02iwgvP90LY96WYurML6DVi4gTFC9/0A/Ph8mLsFWWoDr1M6qWY6XwaT8Rdn/s+kOf/UUyMZbf6fx4yi7Y97UIRDo38ItyzvQwG8EvVgrQyiJZfW91reTl1AQd16BSi5hUkiktWGn74fRqRwhrVHcpOk3lcsMWnADBraT1wlwdenhwjrh+bl4lLRg1ZJ1wFnHqsuUVaDZQ2qwUrmr0Gg094wL5cVZvXqnjdLuczadymNg1ik+ndcFkseFh0BAT4MGqY5l8tf0CpVUWtGoVI9qE8cm0zjzw3eFa987JjBIGtHBMPbuQW8aELpH8eG9vzFY7S+7qSUq+kf3JBcQHezLS4wS6b+uIDOV5hGRMJ7jfY6iKU+H8JgBU6YfE+TVpkYgx1ko5RwOaQ0AsJP1cv0Cx22Hdk3DTcriwDQY+LefUiWUivpTlQKtxENYWvIKlxbBuC1PhRdj4T1BrKL31V3yOLYEbvgJzlYQR6z1F0KwJDa0hbiD8eLfrdqjD86DbbY5g4oaoKhORuCxb2i+3/0uuAZfT71HY+aFsFzcANjwPv76LfczHWCJ7oYvqjurrIfVfV1mE+pd/cFfP9/AyaETEOfCN5HPVsP9rEZEmzZdcHwUFhauaAE8DH03pxM2z99W2vQJE+LlhP7UKVVQPEXar6ov49JoFqkZ2u223g8lIodYNz9+ztQrE5ajRQfr+WiFHrVIxvE0Y83enkF5UQYSf++/7ngoKCgp/QRrZX5a/N1o1uOlc23sBbuwSxZvrTrE+MZuoAHdCvN1IL6zgqx0XqbTYGN8pgmWH0rHabHjqNbQO9+H+wfFsOpXD6awSIvzceWtCeyL93DieUcLLC5KIDfTgH2NaUWR0rJTf2jeWp5cdr22xenFlIu9M7MieC/msPJrBxbwyvprRFbdTn0lR5xUmo73tdsmBubC9OsvCDJtfliBUm03EkG+Go7ZWwZTFcHGHrFibK+W1Gr1kUfhGyur26TXVI4vTXH9DdO6QewryzkhRll5nld7NT4rJse9JQGHGYVkluuFrKTBrwlN9I2Hwc/J1q3FS1EX3lEk25zaI26CiELa+LiHK/k2lLer8ZoeIU0PRJckKue4zMFSPIm3IOQDyucwNh+MqXF0YdBqCdRp83BrONnLXaQj00rPvYgGxgZ6oVbD8UBofbzlfu43FZmf18UyySip5dFgCPx/LwGa3E+7rTkG5qXa7J0a2ZOf5PObvTqk9F+OCPHnzhvb4WXLxXvF4vfcHUJ1aJS6ThNEiUl7cBuV52O02VKselN/nGsZ/7cjIcUXSz9IGOXc0TFsmgeBWk7Q91Uyo6zxDgpAPzav38op2M9ibo2aI1h31jnclQDxll4ib5zbJ+Tj4OXGv2GxyHbjc0VeXtAPgG95wQaVSyTXFN7L6azVMXQyrH3W49jyD5Rx2D5LWK7tNhFy9J5jKcVt9P0z/UdrFGmjVVSdvp8doFXqtBvJSnUWcGtIPSPZWv8dA0/B1XUFB4eogJtCTxXf1Iq2wgpT8cqICPOjgVYJq1XJofb2I0RtfdFyjvEJg4DNQWYpdY6BRJb9YKgE7hRY9gb/35HStHgLi4NJeGTJRzYAWwSw9mMqcnRf5x5jWv/ObKigoKPz1UIScRoSnQcfMPk3Z1UBYaucYfxYfSOWL6V3ILK4ktcDIDZ0j8XHT8u6GMzw9uiXLDqWz+ngWd/aPo2UTH+6Yd6C2fQNgzYlMFt/Ri8d/OIbdDkfTikktrMDPQ0eR0YxBq8ZulwlaNZRUWrhz/gH6NQ/ioSHx9GkexPzdyTwc1xGvSfMl6+bkT9I24d8UBjwpIovVDJ2mS9Gl1kpOh3eoBBb//KAEjQa3EEdNwijJwUj8USy5gc0lUPXgXFA3UAQVp0FsHzi3UQrD3vfL6GCbVVb0TeXiApo4X4SckNbwyz+g+x0ymtxul8ybA7Nh+Muw93NoPlQEqBM/wh1b4dBcEYsCm0uGRuFFybsZ9IyMTj77i/MxnVoFxlccQk5Qi4Z/4O7+Il4pNCpu7BrJ/D2uXSFTe0STEOpNm3Bf8suqOJtTxpxd9bdVqWTqVadoP/LKqlCpYESbMNYminurT/NAzuaU8u3OZKfXXcgr556FB9k6PdghpNSlzXhoOVqmMOWdEUF10LPQpCOqwosySvvYEnHXBMSJu+zy6VR1MVZPkxr+mrQCHppTX9w4PB+u+UDOqT2fyjll8KG8y90UtZxCT50NomaJqHJmnQg4ai30eVBG9p7dIIIvwF3bGz4WEKfN2Q1yrm95tf7z7W6UzzbuU3EPnVolOT43fiutVtYqEXlLs2DhDSLigFxzQlqJUGS3QcYR+byuCIoH3yhUmuo/r0e/a/h4938FnacrrhwFhUZCqI8boT5udImRIHNbXiFc2iOuRJVGcvFq2qg8gyH9sAxWaGx/y6vdkEUWLdG/d2sVyMLZqVXivNTL/ZCbTsOQlqEs2nuJ+wbHOw0RUFBQUFCojyLkNDI6Rvkxok0o6xOdgzwHJ4RgMtt4dHgCjyw+QkmlQ2gJ9jbw9oT2+LjrCPY2sOFkFrMGNeOB72TSUkygB2WVFvLLTYxo3YRvdl50qsV+OpzOtB4xfLLlHBq1iipL/ckMdjtsP5uHxWYnr6yKAymFaHo0h2XTHRkXIELHhmdhxgq4tE/Gf+99XoQW9wDo+4j8/+ZXYN61Iua4B0sB9t0kx1SItP1wbLG4WyouW3n3DIYBT0jhiAqmfCdBhCWZEt5aliMFX5MOsPEFEXGiesgEnLHvSxbHstvEWRA3SKbsrHkCMo9I4PKot6XVojxXMnPUGrmB6/MQFFyQrJFdH8GU72XKTd1WLbvNOfQwshsYvKGqtP4Pu/eDImwpNCqiAjy4vW9Tvv71otPjCWFeTO0RjVYjN/mBXgayS6soqyOKBnjq8XbTclvfphxILmT8Z47Mpk+3nmd6zxgeH5FA//ggZsx2Hc6dV2bCpqqTx6BSQWx/iOwK8SNEaOw0DSK7i6BaUQjb35TzJryLiKs/3gXXfwl2K8SPhMxD0v54edthx2lQkS9CbXmOtCeeXOG8jd2O9dJ+LnR8DI/Y69HZzeRWqogJCSBsx2toji+Ge3fD4puc3TYXt0lWVqebIHWPPFaaA9G9ZILV5XSYKm2O/jFyTsUNlKyptP2Sh9VzlkzFO/uL5PHUcGQhhLaVc3/n+3KeXy6C1QjPNZjLRagFEaAiu0FoO5mcl3sK8i+gTtkJakDj5vLnBFS7hhpvAL+Cwt8euw06TJOboDOrIbYvmCrkHuPMehFw1DpxoTQmqoWcYrMWrz9KyDm5QpzSTfvXPjyqbRhrT2SyYE8KswY1//3fV0FBQeEvhCLkNDKCvQ28el07ZvZpyvf7LmG12ZnUPZoWoV4UG83c991hJxEHJAj5rfWn+WByR54d04ogL7H4Tu4eTVyQJ+dzy/D31OPrriOnuJKlh5xblXaczWNU2zCmdI9i6YE0AjwNaNQql6M4x3UM551fzqACVCXpziJODSPfgB3viVumJAP6PgSl2VJslefKDVH7SSLUXNgGA3vCspvrj/a020Q0mbrU8ZhnkIwY/uVZ56IwsisMeAp+mCmC0eDn5AZr6lIJcd36umynUotr4Z49sip/erUUvjXtJnY7rHsC7t0L296A4z843mPPp5Lnc8dW+PFOmVY1ca64H2qEmqB4KEqFHe9K0RvWDm5ZDQtucDgfVCq5Mew0rWG3kcJVi7+HnlmDmnNNh3AW7btEcYWZ8Z0iaB/pR5ivc1HvqdegVkHrcB/uGdCccpOFwnITCaHemCw2NpzMpsLs+L2fvyeFBbd1x6BTU1in3fFy0qvc8QmKl9bGof+U8yh1L5iM4mRb/bBMclNrYPTbklV1aK5MbosbJCO9S7NktbT1NRDaStxjBRfF6WKphJZjsVeVoKrJp6rZl0egiCOWKtC6UdFxJskJt5FT5cEX27OYNag5F3MyaXniWTQnl8PMNRIa7qplKmUntJsgo8YLk+Xcm/AtzL/OuQVszDvS/vh5H4fgEtpGpmOp9fI5alx4vzxb/32yT8gx69xdO5kiu0qWTg1xg+V7MPpfcmzZSRDRScSo6sl12oOzxW140zI4+aOIvJfTbIhso6Cg0DgxlUG/hyH/rCxCfTfF4eSL6g4DnsQOqPWNLPPFbMRmV1FiVv++48drcPcTJ2LqXichx89Dz4AWwXzz60Vm9onFQ6+UKQoKCgoNoVwhGyFB3gaCvA10jxVrv1otf2TTCivIKql0+ZrEjBKKjGYeWnwEjUrF6gf6svV0Dm+sLajdxsug5cMpHRmUEFJv6s4/fjrBxK5RLLy9Bz7uWu7qH8enW89f/jbEh3pTWmmhZZi3TJK6nJA24Bkq4o1KAyvvl6yIGgKbwah/yQr3qdXQ7xEpgMrzXH8zKgoBu4gvicslC2fjC/WLwrQDIrTMXAcaLVz8FdIPSphp1lHHdnYbnPhB8jDsdglQvRzvJiJQ1RVxaji2BGL7SYGnMYjQNPhZWPukCDSDnpUMkCML5V9wSyn07twmN4GVxeIo8AwGN1/Xn1nhqsffU4+/p0w5sdntaKqt9hVmKzklley9WEBhuYnBLUO4s18cHaP9eXzpUaeWxc7R/nw4pSP3LjzkNI583u4UHhuRgL+HrkEx55zRk+CRXxBkz4elMx15Md3ugIUTHNPdBj0rLY11XTTphyD7JPS4S0TMuoHDsf3g9o2QeQJ7VDdUs4fJ480GQ4cp8O0YaDFcsnWwY/UIIqkilJmLzjOmXTivjW/L0dQixsbp0GxdBm0miGPlyG/k8DQbLC2OBRdg9WMw/SfJobq4XYSnyhLY85nz67ITRSCd/hOUpkPeBbAYXb2DcGyxjE6/3FE08ClI/MlRnDUbJC6flJ3Q5npYNAnGvCvXskrniTZUlYiQO/hZ+V7WRWsQQdng3fAxKSgoXNXYVRpQASdX1r8nSN0nIelj3v2fHNt/hamcEjywo8Lrj+pwCmklbWk2i7TTVnNthwi2nM5l0d5L3N4v7g96cwUFBYXGTyOch6hQg1qtqhVxAEorGl6hBwlR/faWbrxyXVsW7r3EngsFTs+XVVmYtfAw13QIR33ZAozdDksOpKJRq7h97gG8DFpeHteWlmHeeOo1dI7256MpnUjOK6dnXCAllRaMbpeN1g3vDKPfgt0fyVjiX551FnFA8nHWPSXFTf8npYjT/0ahU1kC3W+XANLwzpB13PV25zfLdKpPekBOooSe1hVx6nJ0ESSMcP1cyzEyhaYhji0WkcjdT8KMvcMhujdMmANJq8SFU0PuKWnD8gySVf/mQyRvRxFx/hKoVKpaEcdosrDxZDaD39nGEz8c4/W1p7jm41+5sWsUj10m4oCMEF93Iosx7ZzzU/LLTZjNNqb3isEVQV56VMC+fD32rW86RBy/GMm0qRFxDD6Sg3O5cAEyGvz7KfWnRiXvgP3fYM8/AyVpMkZbpZKpLCvuhcpC+f1fMh2WzEAz/zrauuezfao3L3kuIWj/u3Q2ZKK2mUVU6nWvtG/ViCSusFnFKVdD+n55r3Y3iugbPwx2fej6taWZ1cHlGvBp4vjsrjAbRVgd8Tq0ukayc27fBFpP+UzeYRJcOvINEYW73QYrZsmxewWLY8gVRSniZup1n1zXVGrJ2rpjCwTGN3w8CgoKVz02lVaGEhxb7HqD7ETntszGgqmcIrtk1/zuU6tqCGkjf2Oyjjk9HOxtoH98EJ9tO0+FqX4rv4KCgoKCoAg5fyHC/dxRNfD31l2nochoZuac/UQHerDkQKrL7SrMVpIySvhkamf8PBzLMD7uWl67vh3lJgvpRZW8tf40c3Zd5NoO4Sy5uxeDEoJ5dXUSL6xI5Na+saQVGnFv3lfyZSYtkFaN0f+CRRMloyK6V+3443rknZH/thgmLhuPAAkgdYXBW/IqVj8GhSlic24I7ybiAlKpJdOjNKvhba1maGjGhN6r/sp7XSqKRLgpzZTPrdFB62th/dNwYin1MjEOzYNy1wHWCn8dsooreeD7w04tiS1CvDmZWeKUk1OXVccyGdbaOSepf3wQc3cnM7x1GDf1iEZbR3XtGuPH8pltSHDLZ3C0FlXcABEJAQKaitOmhqjuIm5ejn9Tcb6YK1x/kGOLUYW2QTVvHDRpDz3ukUBPV8XKNR+g3/EmvgtHod39IV573iFq8VC8Ti0WQTTzCFDdztgQCaPEeVOXLrdA5lG4uFWyrOq2WV1ObpKcs35RIjw1REzv6sD1JnI8vR+QDK/Y3uJCGvaStAEsmSETuiqKJRy62RCwuv751WKzwJAXYNY+eOi4tIeFtgGtEuapoNCYMel9RQS2mhreqNxFu+bVjqmcfKTt0+ePEnK8w6QVN2Vnvaeu6xhBkdHM/D3Jf8x7KygoKPwFUFqr/kIEeOiZ0DmSpQfrj+O+pXcsa49nYrdDYbmZSnPDK+BphUbC/dx48do2GLSS0WK22thyKpvoQM/a7c7nlvPW+tOsPp7JI8Na4OWmJb+8Cg+dhqV39cRgSYaU3ZIt0WGy2HpqVvivdNMDsrpt8JHVbpMRhjwnI4IvZ+SbUFkmq+Mqteu8iYRRUvgVp0F5towbvrDtyi0Naq1M9FGpXIwYVkHzYVJIuqLpANj+lkzL0rrB8FfkvUsypEi+fAqQ2XhlR4LCX4L1idn1fpX6twjmUkHD7T5VFpuTOOvnoWNM+ybcOucAKQVGnhndkus7R1BcYSHcW08zVRqadbejvlR9YxzTRwLBN70s2S9db4fmg0V4cPMVsebsLyI61uDh/9vOlRob/NY34Nb1sPW1+tuFtZf3dCHYqksz5Frw6/tQfAnu3glHvxf3itM+OkibYY24q1KJK6bpQCi8IDlXGr0IvZVFro83pLVosqsfERdQRGdpH6uLRgd9HxX3T1Bzcfgsu9Vx7ns3gRvniEtH6y7bVxaKeBUYJy46jd71dU2jl8+g1SvTqRQU/kKUVZpRWc3YDV6oNLqGnTe+UX/ugf0emI3k26uFHMMfJOSoVCJoJ++CHvc6ZQKG+LgxsEUwn249z5Tu0Xi7KaK3goKCwuUoQs5fCLVaxX2Dm9M0yJOvdlyg0Ggm1MfAA0Pi6Rjlx8mMEq7vHIGnXkO4rxsZxa7zdOJDvYny9+DNdUlM6BqFRq1Cq1ExvE0YKheWn8SMEl5Ymchd/ePo0TSA+XtSuK+jFvWCaxwhv006ws4P6hysVoqhmhsfjV7aGcI7iavGOwyyTkC7ifLHPu0ATF4Ih+ZLURfYHHreI2LL8aXiajFXiOsnfris9JuMsuofXh1CWvcmq8stElIa0gpykup/E9pcD5nHZDT57o+l190zWN4zuKUc+8EAMDq3p+HuL1N2Pu8tX1sqYc1j4ko6shD6P1o/yyO2n2McucJflszi+g4XbzctHaL8GnxNiLeBsioLapWIPnf2i+NiXjnPX9Oa9zee4bpPd9EtNoAIPzdeH+iN7pvhzu1QKTvFtn7jXMg9LefF7o8kiBjkJnrcx9WOtuopWwUXocfdDX8QvxgwVjvIzBUioIS2rZ8P0Xqc69HbnsFyfi2unvRi8JFpVDfOkVbK06vl+tD5FsmjqSqDG+dJi1hIK7nZnzvWITbFD4OuM+HX91y/V0AzOR9tVvjlHzD+SxGYjy4SZ11sf+j/GBj8wJgjeTgH5zjvpzQTFt4IE74GlR3mjHLO7aooFAfPjrfrH0Pfh+U4FBQU/lJUVpbjd3EbqlYjZEDDYRdZX0EtGuf5byqnwC6LXd5/pIYS1l4cl5lHIKKL01PXd4pg+9lcvvn1Ig8NbfEHHoSCgoJC40QRcv5ClFWZScosYff5PP4xphUeei3FFWZWHkkn2MtAodGEj7uOM1ml3D8knqeX18+SaR7ihYdeAyo747tE8szy47VTsLwNWhbd0YNmwZ6cz3UUi2qVBC0XGM1czDdSaqzEO3GF80htcwW4+ThW/k+vddz4hHeCIc9LUPC+r8Qp4NUEwjtIkWcySrbNT7OkKOp+B3gESfG09BZHJo5GJ0XowGekeNV7iljzzdD6rpqDcyCim4QQrnvK2V3T8hrofb+0UfhGQPe7oPud8nmadIQvB8j0qfFfw8Fv4fQaeV3CGAlntpgkD2fvZxLkByJATf5OxJy6k7w0OhjxquTpKPwlySyqYH9KAR0i/QCH46RlmDeDWoZQUmmmVRNvkjLrj6B/aGg8Uf7ufDy1M/uTC3hy2THuHtCM19ee4ptbulJaYSbIy0CAuwr9wbccIo5fDER1E/HywlbJYrKZ6wsN2Ynw072S+/LDTHmsohBQiWCZe6r+B+rzoEPo8I0QkTWyhwT3Wqvg/BYJAw5rB/u/rv/6TjeJmGm3i4tu4jzY+E+5kW86AIa+KEJuYYqISz4R8jlykyA4Ab6f4ewYOrcRWo2DTjMkNLnG3RYQJ/ve84lj4l3T/tJeGdtXpsa5+YLOTY55xzsw6B+yvSuqSkDvAwXnYfQ7UFEgQlXqPhGxhr8M13woDsTiVPCNFEEsrL048xQUFP5SeFlL0LYZLdeCTjMkry9ppWODsPYiHGs9/ncH+Z9iKidfHYC3HjSXhyb+nviES8bYxW31hJxALwPDWofx5fYLTO8ZQ6CX4Y87DgUFBYVGiCLkNAKMJgt5ZSZySysxaDUEeukJ83Gr546psth4YtkxSios7DjnnLkysk0YyXlGXlsjhdndA+J4bmwrPt96gdyyKtQqGNY6lKndo3l+RSIvjmvNI0uOYqkzLae0ysItc/az+M6e/Gv9aTpH+9M8xIsKs5VWoR742UsoqzJCvAcehy7LtDj5E7SfDJtelK+PLITrPpXsivhh4pip6yRY9aA4dHrdJ21Wg5+VcOSMI9ByNKx8CLrMcIg4KrWMHT+2WMYjg7RUeYa4aI2qZu9nEtJ6zQdSKJorpN0qaSV8PVTaJFQqyQBx95NxzaPeFJt03lkJdG0/CW6YLX3eWjdY/wxc2g1eodD5Zvm38j5pA/EKcfSEVxZL0Tr4H1CQLO/rEynvaakUh45S/DU6iowm8stMtaKpu07DPQsOciKjhPcndSQ20IPkfCMBnnpevb4t077eS5XFyoeTO/H9/lQ2nMzGarMT6Knn4aEt6BDpx21zD9ROo+vdLBCVSlqsdGo1+5ML6Rzth7aqAvXFrSJMjHpLfpfPbxaR4rrPIKA5zB3j+qDLsuX8SRgt51rcAPn9nThPzqVTq0QI8Q6DAU9Jfk5NOKV/M4gbCOc2VIcm26HtDdD3EdlHbF9xzNUlKB72femY7lbjFAJoP1EE3cTlju0NPnDtR3I+l2ZKbk1d7Hb4+QHoMhPu2SVOGZ0boJa8qqSfZbsOUyTTZ8l0hztPpYKe90mOUHGqjB83Ol87a+l8s7gF930pn98rTJxAnWfAygfgl+fkezblexHD8k6LUH1mnRQoiutOQeEvhc5ukb/dp36WyZQDnxGh21Qu15KCi/D9NGnnJvQ393dVYS6nQOWPj+4PFHFArsFhHSD5V7nX0jmPaR/XMZytp3P4aPM5/nltmz/2WBQUFBQaGYqQc5VTWF7FvN0pfLzlXO0I4hBvA19M70K7CF+0GkdedV6ZiZKK+qGb/h46fD30LNp3qfaxz7ddoEuMP0+OSiA6wBMfdy2/JGZRWmXh6dEtySyq4svpXTmaWsSnW+W9O0f7Mbl7NPllJp4Y0ZKHFh8hr6yKueObEHroS7xOLydQpabJsFexewY7RwVnHpW2pLiB4hCw28QJMO4T2PN5/ek4IAVYu4kifCyaJK4B7zBxAIx8zbmVImGUOGjOrHc85h4AZVcINC7NlPyMeddKGOn4r+T/62K3y4Sa6z4VZ4C5QrIuQI75wGw5nu53imBTu+8s2PYmtBwrRW3uGfnMGYdg6D9B5yHF68YXxcmQdhC8UmH3J5IZEtENet8njiKtsgrVGMgsruCZ5cfZctqRgdQh0pcnRrbkwe8P8/Kqk7w7sQNLD6YRF+zJm+tOkVsqLU53LzjIDZ0j+XRaZ+x2O5H+HpzKLGH+nhQeGRbPE8uOo1LBPQObsT4xi8eHJ2C22RjbvglZJVWY0WH3CEI16BnY8jpkn3Ac2LElMGNl/WymumQcljHexjyZJOfmLyJl04EyHcpuE0ea1SpBwJFdRfBoOkAKldw67YmlmeJi848TZ9uFrfLekV3FfefmC+0mQefpEpJ8fqO8LravCEl1RRwQJ8yPd8pnuLyVsQa7TSbJtZ8kYnGn6eIoqiyStgZzBbS5Tq4jTq+zS6vZ2Pclv6qiUM7zy8WiiM4Q3lFGt9f9nFtecwQj73xfMoFS94pwU5gi39d2E6V1VEFB4S+FylQmrZ5Fl2QRZtM/q5+4PFuvgcWkqxlTOQX4/nFBx3WJ6CLOypSdMtGvDj5uOq5pH86CPSnM7BNLTJ2cRgUFBYW/O4qQc5Wz/Wwe72086/RYTmkVU7/ay/qH+xMd4LDsWqyuA3OHtApl7YnMeo8fTCnkYEohrZp48/r1bWkT7ssTPxwjv9wR2DkoIYT3JnXkWFoxTXzdeHv9aR4Y0px//pxIUmYp62Y2pcWaGyXMtxr9modkhO+5jc5vuPIByePo/4Ss8qu10n6x4l7HNp7BkmtTI3Sk/CotEJXF4ho4sQzaTpBWpborN22uh1UPO79f3hlx9dQVd+oS3kVWyyuLod0E2XdDHJovRWL8CPCLlRu1qhI4NFeKxm1vun7dqVWykh/bD4pS4dwm+VeXpPYyNn3etVJ8egbBhc1w7DspXmN6N3xcClcFpZVmXv75pJOIA3A0rZg3151i1qDmvLI6ibsWHOTaDhEMaxXKh5vO1W5XabaxcO8lFu4VsfWtCe3JKq6goLyK5iHeNPFx44lRCfx4KJ3lh9PZeDKHD6d0ZNaiw1wqMBLqY2DtDU8TkLLGWcSpPcAMyW9qaLpTWFuH2DjoWewGb1RrH5cgZKhuf3xBBJHVj4gg2esBaSOsK+L0vBciu8Hh+eKOa9IBZvwkmTxnN0gWgl8cdL9dwr/PrhPRCKode/90fXyWKkjZdeVzQecu7ZRVpeLQ0XvB1O/FdXduo8OZ44pD82HgUyIyTZgtrqOj3zmmZXW9FTa/6vq1icth6hIRhAKaSqumzQIDnpDrSs97HOLvf4vNJqKY3SY/T53i2lNQ+G8oqTBTXCEOPV93HT7u//9AGJXWAKYKuc7Vpa6IE9gc1I1wMcZkJM8ejvefIeS4+8n36cz6ekIOwKh2YWxMyub1taf4/KYu9V+voKCg8DdFGT9+FZNbWsn7l4k4NVSYrWw97TzSMtzPHZ2m/h9dN62a0kqHU6dH0wDuHdiM2/s1JTbQg9JKC54GHQ8vPuIk4gBsOZ3D/ouF9GgawIs/nySvrIoWoZLn0a95AOEpK51EHEBWpQsuYO85y/nxdjdKS8OKe6D1deLSyT9fPRJcJYXi8FekwEs/KCO8202U4gxE3LGapBXk+BJoMdKxb7XOOZMHpJiK7ScFz+Wo1NDrHse4co9A58k9l2MskKLs4Gw5/qU3O1bjA5tLW0ZDlOWIk+jy71MNB+eIPXvqYpnuFdwS+j0mDqFNL0PpFSYIKVwV5JeZWJvo2v2VmFFCbJAnKpUINksOpJJa2MBo72rKKi10iPRDr9UQ5KVn0R3dCfd1Z1rPaBbf2ZNvbunKk8uO1068yi6pokzn37AYeXRxwwHG7v7SLtj1Vjm/Zg9HVZwmDpSpS2WiVPvJIlbu/0qEibu2Q+dpch7W0HoceARI1s75zZB/Dk6tlt/fxTfB1tfh0i4IaSEtB6ZyEYJaV7vgDN7OAcKXU5wm4kzcQNfPd71Vri8jXpPCylQm1wW9lziLSho4v9Uacb/lnILld8DsEdLGGdNX9gXgEXzl60P+Oeh6m4wkt1Vfa3e8Az1nQWCzhl/371CSIcHrs4fDl/1h/dMiODXUOqqgoNAgNpuds9ml3LfoEP3/tYX+/9rC/d8d5mx2KTbbb59TFpNJrn+VxdIG7RfjesN+j8h1qLFhLiff7o33HzWx6nIiu0HOSXEkX4ZBq2FStyjWnchiz4UGWl8VFBQU/oYojpyrGLPVTnK+i5ajak6kFzt9HerjxgOD43lnwxmnx4+mFXNdp3Au5pXzynVtOZhSyPazubjrNMzs05QWYV4cTy+uDTW+nKUHU+nVLACQwONCo6xejYzT45P0o+uD2/IqquGvwsx1cHGrFEIhrWDOaHm+LEvC7fxjpP0osLk4aGoydEBaFA7Pg5t+hGZDReyxmMTRUlUif/Q73SQ5FJXFEppXkuF4fUgreW78V1KEpu2XxwPiYODT4hiI6iGPlWRJO0jPe6Uwyj8n722pEoFo4NOw7mk55hpKMyUT5/b645Wd0HvA0plSsA55Hja95Px8YLy4k767SfJxavAJh2s/lgBF70bWX/83o7TKcsV62mqz88VNXbDZITG9GC+DlhBvAznVrVWX06qJD4dTC7l3YDPyykysOJrBgeQC/Dx0TO8Rgx24mOd8bTibV0m0uYFR5he2iJBzeSCwXzRM+FZyaA7MlrHbQS1E3Lm4Q26uF0+Tlqcazm+GFiMklFhVZy2gwxRYMsP5fdtcL4HfMX3ESaLzkHaq4ARH0HBZjrjWStIkPyfPtXhNTG8ZT97vURFILNVi2LnN4mLzDpNWqj2fiWtv7AdgKhFxZuz7EN3L5Sh0etwjgtOJOlO3jPkyUr3/EzBrn2PKV0P4REhr1qXdjsdsVmzFaRj923DFdJySTGlps1kkxN07TELQL9/m+2nihKrhwGw4sRzu3CLXNAUFhf83qYVGxn+6i9Iqx33PtjO5HEopZPWDfYkO+I0WnrIsVKdXi4B7dLFcRyvyxbVorhC3b3QvKC/ArrLzJ8khvx+VJRTYPEn4s7pCQ1qL6H56nbSqX0af5kFsSMrmhRWJrH6gr1OsgIKCgsLfFeVKeBWj16hpFtxwCdApytlpUl5lZlS7JnxxUxfahPvg46ala4w/9wxsRodIP96+sQPP/nSCT7ee50R6CfuTC3lhZSLrjmfVZnW4wmiyUrdrK6h6coDZxpWzH85toMpsliIprK1z+5FGL8LM8R8kMDSyi+tRxWU5kj0x5HlALStbNROetr8lBdSNc0XA6fuI82sri2WKzop7ofkQGQE+cZ6INTvekeDkiiIpHrvMkAlUi2+SMNSTP4mIMuwVGX3cpIPkZLj51j/GsxtcuwT0XtKOFdJGiuOtr4PeG7rd7rzdoKfF4VNXxAH5TNv/BSpEzFG4avFx03KlwR5eBi3J+dIC1bdFEAeTC3hkmOtxqtd1DEenUdEs0BOLzc4t3+5j7q5kEjNK2Hkun8d+OObyfN2UbKKy2aiGD6I0Q1oXb1kFNy2XUN5+j8Ky2+GzXiIk9H9cMpwW3iC/62fWO4s4NZxZL7+f7aszZ/Re0rZlrePoC20j55paI6LkgKegyy0iSvpGyrlbWSoCj1cIBLcS4cQV3mHiEPKPlXDk1Q+LaLTsdrBboNlgEYENPjLZrvlQ2WdliZybqx6U9jCDj2OfWjdxEbWbIEKKysUPcPdHIixp9CJGuULnIUHGqXucp98B6opCNIe/xVKcUf91VotMvPpmKHzeF74cCJ/2lCD4iiLnbbOOOYs4NVQWwY73pHBUUFD4f2G22li8P9VJxKmhtMrC4n2pmBtoVa9BjVXcwjYz9H9M2jdPrZWclxPLZMKlRxD4NAH3wD/ok/xx2CtKKLC54/NnOXI0WkdWjqX+9UytUnFLr1jOZJcyd3eKix0oKCgo/P1QhJyrmCBvA4+PSHD5nLdBS9/4IACySyo5nlZEodHM62uTeG1tEsPbhPHF9C7c1rcpBeUmdGoVm5KyySyurLevFUczSAjzbvA4gr0NVJjlhqe0yoJeo6ZDpC8/JBkpaD2jwdfltLqZD056kTLoY2waN+fWopSd0GwImI1y85O6r+FvxMmf5L8BTaHjJCnaQJwz296ElfdDTqKMNx/5Jrj5yfPnNknrU1kObH2jWqSZAWsek7HKPe+R54a/KgXhha2O98w8CmufEFfPxW2wcpYUtOM+kQBXkAJVpYJ9X0gmkHcTx+v7PAAzVogDojgFetwFt20Q4arjTfLeE2aLuOTdRHKAQlrVLzYv7ZZi7YeZ1RkmShvF1Uigl55xHcNdPtc52g8fdx3llWbWnchi0hd7+GDzOY6lFfPepI60CZefd6iPgWfHtGLWoObY7dA6woe315+u55QrN1nwNNQ3Uy47lk9Ox3tdi42hbURw2P2RtDQtmgTfTYafH5QR3zYL/Pqu/P7lnxNHSsJo51G6l3NkkYgbTTqKw6euO8c3Ulol546V86dJR1h0IyyaKOfrV4Ng8ysw+i2I7S2ZOktvlmMb+560OtYQ3VMmb2UnQtYJOX9rriVWk7jufvkHjHlXcrUSl0ur48YXJKg8pBVM+0GO94av5Fg6TIEb54gAteEFCG0r2/S8V1bWJ86DyYtEePIMlglUg58Vcaguai2MeQf2fS1tnHUde2oteIfivuFJND/MlAD0uhRfklysutdFU5n8TDIOOx6z2eTYG+LUzw1nHykoKGC328kvqyK3tAqL1UZppZmtl+WZ1WXrmVyndnSX+1Rpoevt8jc5JwmCmoMxVyZphrWD0NZQmoE9pDVqfePLsiqrMGKya/+csOMaorqDyejIJruMuGAvhrUO5e31p0kvUsRrBQUFBaW16iqnR9MAnh/bmn+tP02F2QpAdIAHn07rTISfO2mFRubvTmFC50hWH89kU5Lk5ny57TzxIR24d6Gs4r52fVtWHHGxKgwUGc2ooHY08uXcMyCOwursHJsdDl8q5KlRLflw0znO+vWmS3hXtBkHnF5jih3MMXtzNpwroF9UIJFu5RL+6REAF7bBwblww9fStuQfc+WMGVv1ype1Cgy+UnwN+odjzHhViQSZGgtElBnzjhRRGp1sP/xV2PCco50EpPUipi9E9oDTq+sXQh4BMOwl+HaUCCk1JP4oroXbNsgxe4bI+9htcMtqWU3SeYoDadNLElpst0tx3WuWjFvGJgGvPz8kxx4/XNxEkV1FLOr9gBTSG1+QwrYsV/abdgDu2iauBIWrCi+DjqdGtcJqg5+PZdTqbb2aBfLitW24dfZ+Xr6+LTPn7K99zaJ9l9hxLpfJ3aJ5elRLbHZ4f+NZXlkt4cEbHu7PjnP1M2Psdmmr7Ns8kF/POfICqiw27lqVz6Jp6/E9+Anq06tA54G98wxUEV1g2W3iTDu1WlaRXZFxWKZNTV0iv6P2K6xK263y+zn4WRE6vMPE5WKplDbFTS+JK67PQ+Jyu7w9KWmFiLMegXDzKlj9qAQp37peBFmNTlZpM4+K0DrkeXGoNXTc3k3kHPEOE8GohpRdcr2Z8C1kHoZRb8qxLJnhOLcvbpOcII9ACUquKhWRNn44uHlLFkara2D0O9IClnUcvMMlmD1lj7QCbH5Zrk01uVsDnoSj3wOgSt0jTiLvMMdxJf7UsJNm88viAvQIkOOoyQlzhdYAja9xQ0HhTyGruJINJ7OYvycFk8XGmPbhTO0RRYBnw7k1AZ569Norr3OqKgoAu4jgGQelpbOGlF1y7o7/WjIAGyH5FXLt/9McOSDfs+AESFol114X17VJ3aI4mFLIU8uOMe/W7qhcOSkVFBQU/iYoQs5Vjp+Hnpt6RjO8TSgF5Sb0GjUBnnpCfNyoNFk5lVmCh17DiYxivt2VXPu6HnGBV1xxupx1iVm8M7ED7244w87q4tDHTcttfZtitdnpFx9Mt9hs9icX8s6GM3wytRNj2odhNHhQOPYbfPKPYjg2H7tKS26rmzhsiuKL/cXMH20gbFW1K6aGNuNlJX7Z7RIEGDdYMnP2fOr64BJGS17OuielUOw4VVbUm/aXtiazUfIz3P2kuFx2m+O1KpXcTN39KyT/KgVsTB+ZcJN+UESk5F/rv2enGbDzA2cRp4bt/5Led41ewluTVgF2CSqO7Qdad1g81Tnro7JYwpG1HpIbsv8rEXGaD5XsnHnXSFBrDRFdJNtn1UNSxPlGiXB05DtpP9E0zpvDvzKhPm68en1bHh7WgpIKM+46DUdSi9iQmE2Ir4HT2aX1XpNaUMG/1p9GrYKPp3bm0CWHoHgla/+nW8/x3R098XW/wLrEbKw2Owatmt7NQ9hX7Mc20wyGDrodH3c9HS1H0BaniZvMK0yyX1yhUou4eWC2iCxtrocWo8Qt44r4YbDsVug4TUaHG7xhzHsSBh4YJ9OzQlpJllVDGTOH5kqgcNJKEWc1Ovn/X99zbKPWQKtxENqu4cBwkPO5wyT4Zlj950xlsO11yQlaeb8ISOM+luvHobkicFXkw7a3HK+x26tbyDKh8wxx1Gx7U44jsgsUp4so3Xa8CD897xEB2SNQBLAza0X4rSHxR4gbIP9vtciY94bIO+sQeVQq6HKztFy5ovMtIigrKCg4kV1Syd0LDnIktaj2sU+2nGPL6RyeGJHgJITX5c5+cXi5cD3WUFZSiOe2NyGkrWS71BVxajAWwN7PpD27EZJVIfcY/n+mkANyb3XwW8g+KU7Sy/DQa7m9XxxvrjvFgj0pTO8V++cen4KCgsJVhCLkNAL0Wg2R/h5E+ns4PV5QXkVWSRXvbTzLp9M6147QBNBpVE6F4PazeQxvE8p3+1w7X4a1DuW1NUlM6RbNUyNbUmWxodWoWXogFXe9hg82nWV4mzDuGdCMtMIKMosr6B4bgEqlosDuhjF8KJrQfuSUmnjsx1NcyEtj9YwYwn4cJYJFXRKXS8hqZDc4NA/iBkkrRsIoOL3WeVs3X8mUWXk/DHpGJkOU5UBhiqxuZx2TVqrg1pC2V26qpv0gbVKH54uAsuw2ed30n+TrXZ9I0dZ6nOy/7ip5DZFdJZunIfLOyir+6TWOx7KOy2rSjXMaDmzd/aEExU5eJJlACWPgu0mOSTc1pB+U0c9jP5Bw2f6PASo4ugh63u16EpfC/xxvNx3ebrLSm1ZoJKWgnHM5ZdzQJZLMIkdbY6+4QB4e1gJPgwabHSxWm9OkFJUKTmeV0j8+iG1n6rtyKs02Co0mgrwMfDatM1abnabBnuxPLuDuhYew22H5cTVPD4ulU6AXpB6XjKbglmL7P7+5/sEnjJaAzqQV8vWpVTBlMZxd7yzEgginFhMUXZL9RnSFwc+JNX7qYkd2lkcQlKQ3/A2rKBShcs9n4o7T6OU8qkGthfFfQvJOKE2X5+vm8NTFJ0LawmxW18+nH5L2srwz8u/Mehj5urR4dpgCqx9z/bqsY9JStfdzcfWcXS/B6T4R0HywHNvKWQ73kt4LrnlfrlF1MdRpX9VoIayDuKNcEdjMebR4QDMJhD4013m74FYiMinCroJCPY6mFjmJODWczCjBZodbescwZ5fzeTqzdyxtInzqvaYuGlMpqgub4dpP4Mi8hjc8t0kcwY0Nu51sk1zDA9z/ZCEnqLlcb5NWuBRyADpG+TG0VSivrE6ie9PAK0YDKCgoKPyVUYScRozJauPDTSIYZJdUEhfkyYXqSTYHUwp5elQrfjwsRdTGk9n8cE9vtpzKJavEOSdnQItgsMMb49vz9a8XeXbFCSrNNmIDPXh4WAvahvtyPrcMPw8dWrUKPw8953LK2Hwqlzm7kskqqaRZsCfvTuzIsmO5JDTxxmyzEVxyor6IU8OhuTLRypgHG56XVewBT0v+zdHvRXCJGygF1rpnpDDa+KIUVTWEtpE2iRM/SkbO1jdkqo1KJS6fCd+KABTVHbrMlDBSUxn0uV8KxOM/iODSckz9oOXfyqKxVsl7gQQZd5wmIa6FyXBpP4R3dh1OWpYjx7Bwgoxj94sSJ8/hBfW3PbZExq/XCEoxfaRN5UoB0wpXFR0i/agwWYkL8qwNCZ/cLYrpvWJ4d8MZtpzKwWaHDpG+PDO6FQtu64HJasNoshAb6MkTI1pyKGWPUyinRq3ixdHxJLgVERZnotSci09oFPllVfySmI3dDkPjfXmxnwehx95Dc/yoTIUb/oq0QMX0EdfN5ROu2o6HlQ84vraapdVp3Kcy7encJnGytZ8MnoGS5QIQ1h5GvQEFydKmeGQhTJwvN+MFFyRMuCECm0loclALuRbseBtCWkorIUg7YuJP4tKxVIib79wGOd9CWsl14vgSyaho0g7ObbnyD+Ty83rzK3D9FyLCuHLf1VCY7HAEHZjtyLvZ/5WIrbF9HbkOpjL48a5qEewXh8DT/kbnfbabIJ/XlTA18BnnnCDPIGkt6zAF9n8t79FhCkR2B1/X2UwKCn9nTBYbiw803LL9zPLjLL+nF1O6x7D1dA4qFQxsEUKIjwE/jyv/jTWozCLY6t3lOhnWTv6OewTKPcXhBVCWXZ0d1ghbfyyVZFt9cFdbcdf+ycevUosrJ+lncWp7uVhoA6b3jOFMdgl3LzjIz/f3vaKDSkFBQeGvinLla8RYbPba8cWpBUYeGBLPQ4uPAJBXZqLSbKVLjD8HUwoJ83UjvdDIa+PbcTilkM2nc3DXabimQzjueg1hfm48/sMxp9Wr5HwjD35/hDduaMfcXcmUVFj414T2hPu5czy9mPl7HCtZ53PLWXowDY1axX0Dm/PKmiQMxecbPviKQilG5oxxPFaaKTk6I9+QVgzvcPh+sgge2950FnFAwk93fQJdb5Ug1Rrsdik8c0/JdJ6UnTD/Oue8jw5TJIvm8z5SSA18St6jptDLPSVTqtJdiDEgxeuml6S4cvOV4qowRYrQPg9LFk7yr+IMurjVsV+dO6jV0uZSnCbjnoNbyXHufN855M9slOK2hpSdEr56pbwMhauCgvIqKs02Qn3cGNcxgpJKM6ezShndNoxb+sQy45t9TqPHj6YVc9M3e1l2T2/umn+AzOIqnhrVkqOphXwyrTObTuWw/2IBQV563h/TBN8jX6BZOJsIsxG8QrAOeJokv4F0jvYnvbCCV3uYCV0xSZxcba8V4UHnDrEDxM01eZGEBuefkwPwCpXx2TX5LjUUJsu51WyIuFcKL8l5UVkMo9+WsdcFF6GyTAqWC1skN2frazDoWcmbMXiL485VW1TvB6X9YMgLsPxOef/e98H+b+S8jO0rLY4g5+LEBSKA7PpIzjmvEOg0Ha77Qtomo3s0/EMJblk/i8tULg4etUbauuq2N9bQcZq0TCatFGFm8HPyWdc9I+LP2ifENVT33LVZxfVUE4Dc91HwiXLer1+UuASX3iyTu0BaR4c8D1Hd6h+HZ5D8i+omAchaRdBV+Ptgs9nJLqmk0GhGq1bh76kn2NtwxdforjCiWqtRodWoSQjz+LccHVWleehPrpSJfeV5shCkNYjAW5Ip2Vaj/yVCRGkmaNz/3/u+aqgsIcvuT4ChAXfjH014ZxHzE3+SdlgX6LVqHhzSgmd/OsHjS4/y6bTOSl6OgoLC3w5FyGnEqFUqfN11FFeY6RDlh9lq443x7XhnwxlyS6t4Zc1JPp/WlQt5ZdjtcDanjA6RfsQEevDwsHiCvdw4fKmQY6lFtGriTYcoPy4VGCkod14h/mTLOZ4d3RqtRsUX2y9wa59YFu6tP/5x6YFUlt7di6SsEp4d3Qp7nuv+c0DGEeeedn7s0m6Yf70UlbdtkIwNnbusdm1pwJ4c0bnhzA9jvjiCVj9S/7kz66DFSBF39n0pq2lTl8hUHLVWCsjoXjKG+fJ8j07TRehpd6O8x6aXHM+lH5Jg15FvyvGHtpHsjJ8fkGK6xz1groJVDzuKNxCXzQ1fi3iTVp2dEZxQv/A8NFdyS9y8HZ+xJFPcRnovcR95hYHeuQ1P4c+hwmwlMb2YF1YmkphRgl6j5okRCXSPC+CDTWdZfk8vfj2X5yTi1GC22vl48zkeH96Slccy6N0skABPPauOZlBcaWZwqxD6Rmjw2vQkmrN1WvrKctCsfphWw14ltNvNzOzgjt9P0+H6z2QiU/YJx7bBLcWptvllcbuEtpXfS1OZiBieQVKc1MVul5vqdjfCwdkiYqx+WNqUet4D5kr5Pa8slKBvkN/h4JYwbSkcnA/Xfiy5NzVTnTyDJR8r77RcB/ReIgoFNBXnz03Lq8+POjfmvWbJ9kumO0TZwmT5LBmHHe1eve6D3R87fwaNTlozN/6z/g9NpYLT6yQT6NgS5+c6ThXX0FeDHI8dmiufd/yX8P0UOWctlTLNzidCzsnjS+XcbXM9DHtRAsovb4fU6OUac+c2+axWswhTXqHObVWXo9Yq8yYV/laUVZrZdiaP51ecIL/6/qRZsCfvT+pI63BfNOr6Bbxeq+amHjGsO5FV7zkQZ2Sg15WFIFfoK/NRbX0NbvxWWqL3fA6H5jg2uLRb/o37FEJaYjf4NL4o8spisu0B+Ov/R1MytXqZVnhmvQhmDbSSh/u5c8+AZry78QxfbL/A3QOa/ckHqqCgoPC/RbkdbISUVZlJziunrMrC7Ju7MqV7FHqNGn8PPcsOpvHkyAS+vaUbc2d2Z+f5PH45mUWvZoGYLDZmztnPE8uO8eyPiXy78yItw3worjTz0PdHuJhbxkvXtuHWPrFO72e3g7e7ljvnH+RAcgE5pVXYXPx9r7LYMFZZ0KrVTPh8N4Xe8VLYuKL/41IU6r0k42HI8zL5xSNA/qXsEvGm3Y0NZ2KAtGTUHdVbl2aDXbcsgezTXme16ej3MunmaHV2zbY3JED01l9kRd4/VnJzJswW0ccOtLkO9n7hev/b3pDMn90fw5rHYez70HyYFHa/POMs4tQcz4r7ZCpOTetU30fgwLfy/yqVrO43Heh4TVmO5Hp83kcm/vx4F3zcVVYCqy5zVij8KZzOKmXiF7tJzJCWQpPVxrsbz2C3Q5smPqhVsKuBgE2AAymFtI3woWmQJ7fNOcDLP59ErVZx/+B4OkX60da3Cn1dEacOmu1v4ld6Fh9roeTdbHvLWcQBESB/ukdujs9thhPLYOkM2dYnXNxkrvCPlQtBt7tE1LHbxc1yfqs44tY9LSHgdduoDi+AojTodqsIN6PfgXv2wMy10npVnCnn3TUfi4DUaiyggpZjpRBa/ajzuR/aVkQbV5O0Tq0SIWjb63L9mDAbonrIcbebAFOWwME50upVF4OPCCOVReKaienreE6tgbY3OAu1NWQnSp5XwmiZZuUTKYLUzvfFDdjvURFtu9wiI90byrRSq8E3AsI7itPGP+bKIo6Cwt+QU1mlzFp0qFbEAXEBT/pyD+mF9Sdt1tAyzJsRbULrPd4s2IvxnSNdCkBXoshokuue1QQrHwZzubOIU5etr2PXe6H28P233uOqoEocOf5u/0MJKrq3XB9PLLviZt2aBnBdx3DeWneKHWf//wM+FBQUFP4KKI6cRkZWcSWvrUli1bEMbHbQqlVc2zGccD830gsr2J9SyP6UQka2DcPboCXY28Cd/ZtRVmXhs23nmdknlv7xwZRUmvHQa5n2zV6s1arM+dxytp/N45FhLRjZNqx2JWt6zxheXZ2E1WbHbLXj5dbwr42fp573Np4lzNfAxUILUVOWoFn9IKoal4neC/o+BE06wvktIugcXiDCjV8MjHlX2gs2viCFkWeQFEEqlevcmspiWcW+PIxVrZEVnZSdrg/UVC7vY/CWiTMgDoIWI6WwK8mQqTNbX5dQ5PFfQ+EFyQEx+MpUHlN5/ZDiGioKQV093rQoRTI82t0oq/Q134vLqSoRh8E1H0r71NkN0voS2U1avy5sE+dNZZGIX/nnJDi6LjYr/HQX3LtXHD0KfxpFRhOvrj5ZK3Jq1Comd4tiYEIIJouND6d0JLXQSKhPw6vA/h46yk1Wvt2ZDEh+1bDWoby6Jom0QiM/Dy1u+ACqStDZTWBXyxSmhlxsmUdg7LsyrttsFCeab4z8XjYbJNOjfn3PkaET3Quu/VCcadmJIrR0nFrd2hQqgceXdsm2HoEyTe7idnGyYJe2xl6zJKw384g44ExlIoLcul7cLLNHyLnc814ReI//IL+/Bm/J9qkJQc891fDnzzwiYs6mlyRMfeJ8cQCZyiXcvMYNVINKJdlbBm8RbNL2Qp8HYNDTkm8R0ExaqRri+BK4/ksp5uaMdghMpVnizOv3mLRguv3/2zYUFBScKa4w86/1p10+ZzRZWXsii7sacGIEeRt45bq2TOkezdzdyVSabdzYJZJecYE08fv3W54M5mK5xnWaASPfgsQfrnDgqQ1P67vaqSwiyx5AF/f/YYi63kOE9VOrpH3Nu0mDm97YJYqLeeXc/91hVj/Qj4j/4GeroKCg0BhRhJxGRLHRxDM/HmPzKceqg8VmZ/mhdOx2uLN/XO3jqQVG3prQng82nqV5sBc/Hkrn+bGtOZlZwsw5+3njhna8sfZUrYhTl483n+OTaZ1rhZx2kb68vlYKKJPVRrifO/4eOgqNznkS0QEeRGmKeL9XBQF+vuh3/BPV5oPitOn7iIgeei8psrIT5Y/zd1McBVD+OVnNHvmmTJ/KPS2Bqnf9KiGnrlZmyvJkJX390479xA+D7neJeBJVbc91ReoeKcR+vEt62stzZUSw1SyFXfOhUJIGqKU1Y/mdMkErqodMrYnofOUfmLqO4e34UhGEglte+TXGfAiMFxGp2SApkPs+DIunOwrrcxslV+TaT6SdJG2/OA8sVVJU2+3idBj6wpXfS+F3xWiyciBFxodr1CrendiBbWdyuXvBQca0a4Kvu45Dlwr55zVtGpwed2OXKCrNVtpH+jK2fRgdowKY+tUe7uwfx+19mqLWnLnyQRh8JFOm5egrb1ecLk4cjyAYVf27n39Wxt3H9oXxX4nA4REAem/YXZ1JU4PWTbKsglqIWNpssPyrKBQnSuebATucWA63bZRzat1TztOy8s7A4XkiuGgNkqPTbAgsuQmu+0z2telF+b2OHw5a94YFXZC2rprCqeiSnBfe4VBVDHnnYNoyET6zT4hI0+MuMBbJdWn/11BwHoIS5Frl30xavEoyGv4emsrFxbT0ZtcuoV/fFfeeIuQoKPzHVJisnMoqbfD5fckFzOzTFL3WtcE82NuNgQlu9IgLwGYDz/8wFNdWeAm3rOOojn0vbl5LpbR+Xwl145wmZ68oJgc/Atz/xyVCbD+Z4LnnUxj2EjTQpKZWq5g1qDn/+OkEsxYeYundva6Yj6SgoKDwV0ERchoReWUmJxGnLiuOpHNHv6b4eegoMpoZ1zGCDzadZVqPaM7llOGmVWO3w9IDEjjq664js7jS5b5MVhvFFSa8DFqiAtwJ9jKgVlHrACqtMPPq9e14ePERqixSwEQFuLN4vD+e84bjqXOXlfWz1QLKlssybK77TAqpulkXddn4vEx8SfxRCrM9n8kUF60bHFsshVfcQOj/RHUOThnMWCnvd2kPtJ0A302W7aYulhU042XtLGoNtBonIs/MtdLGkbrH8XzWMQkvvOErcdZkHgPfKBFx5o2FqUulqHT3l4LzcoLincNdNTppDTGXuz6eGvyi4ORP8nzBBWnJ+vmB+hOGbFYJWb3pBzAWSHGq95Tv68Fvxdmj8KeiVsl5VWQ0M75zBDvO5rH8kEyNG9UujIe+P0LzEC/yyqp4cEg8H2xyHlE/tFUIAV56vAxaXrq2DUlZJbyx9hQPDIknvaiC2+YdYPHkKHp4BtdvzQM5J5JWwrHvoe31Mv3D1fkF8vv5wDEoz5Gw7r1fyAQmkBXQU6vEmXbNhyJWJP7o/HpLpQgzt2+ChJFgMkpAMXaZtNbuRjnHvJvIaO7+j7seeW4sgMML4YbZcs4ZvCQfZ/PL4tKrIeu4CDXxw10Ls2qNBI3XbZ3S6GHne5L50+Z6ae/yCpNztiRDPnNUT1jzqOM1pVni3Ln2I3H4tBwt55MronuJAFV0yfXzdpsEnmv00jKloKDwb6PXqon0d6e4wkUQOdAixKtBEacu7rr//HbXXnAR1ZrHUNVM0wMJew9pJdcAV86bqB6g8/qP3/N/SUFREWbC8PfQ/W8PRKuH1tfCwblwarW4QRvA203HA4Ob88+fT/L2L6d5elSrP/FAFRQUFP43KJJ1I6LA2HBWjM0O2SVVfDCpE+46DU2DPBnfKZLHfzjGusQsRrdvwqJ9joJD9Rvxe/4eer6+uQuvXd8OnUbNoJYhALjrNGQUV7LySDrL7unNC9e0ZlqPaJZMbUqTVdNlSkPCqPotP3U5OEdufsqyXT9vqQLsItwAhHeAolSZ7jRxHty8SsaJLxgvYtCKe2DuWDAWwnWfSoFZ0/K0/h8SStq0v2P/Ia0k8NVugxNLZcWnrohTQ1GKTE3Y+QFEdIJON0kxOvINCXj1iRAnj+aymx2DNwx7WQSoGjrNkKlW3hHVK0suaD9JplwZvGDws9K2ojXUz/WooapEitQlM2SM8rqn4fupEDeo2hGh8GcS7GWozZca1iqUFUfSa5+z2yVD6lK+EYvNTkZRBXNmduPhofHcM6AZX07vQpcYf/ZfLMDHTceD3x/Gx03PxbwyYgI9WLxfHDzPbi4g65oF4Obn/OaBzWDYK1CSLk63jMPiShv2kpwz134kY8dBboYrisX6b7fLuXJwtvP+Ot8MkxfKOZD0s0ysGvu+iIVxA8VFM+4TEYsWzxDBpPCiCIjb35JJVxq9ZEQFNHMt4tRw8ic57vXPwA+3yrleV8SpYdcHMOBJ1xb7YS8552GpNeAdJs6/smzY+7k4Z0LbyPlc45Db+IJ8Ly9fOf/lWRnHbrdLG+jlqLUyaeu3UKnhh5lwcYcciyvRV0FBoUECPPU8NDTe5XMatYoJXaJcPvd7kFNcib0wGdXFbc4izmPJsrhjt0s7uOqyW2mPQOyj3sLm20BG4FXOpTxxQAV7XgUlQnBLycvZ96Xcq12B5iHeTOwaxRfbLrDzXN4Vt1VQUFD4K6A4choRPm4iGKhVMKhlCANaBKNCxa/n8tiYlE2gp57Ptp3nqxldMFnspBSWk1NaRU5pFSHeBrLrOHDyyqqICnAntaCi3vsYtGq83bRkFVdi0GnQa1Tc3CuWE+nF5JRWEeHnztQeMdy94CC+7jrCfNxQl5Y6VqY1ejDX328tV3quBpVGRJmKQojoIoJFaQb8+o4IM8tuFVdKXY4skG39YhwFU/45WHa7FKbd7xTx5dIumV4TNxAmzJWwwoacNadWQ/c74Ohi6DRNsjr2fCZOgms/EjfB5O+k8Cy4AGFtJRdjy2sOV0x4J2jaD1J2i9Ay/ksJY93+NuSclMK0220iVB1ZBAOfhJQ9UHxJRple/4VkbuSekuK7rrBTVSpFZU0orM0C656UdrTCZLF+e9UPe1T4/dFo1EzqFs3u8/lYqvOkagj00tOveSCDW4US4edGfKgXTy47ir+HAb1GjZteTcswH3JKqvhyxwUeHZ5ApL87o9uGsea4Y+rK2Zxybl2v4vWxqwmvOkdQRQqqJh3E8WEqEyecRi8FRsF5+T3POSk5Ut3vghGviwNn6S2QWR0SPmmh87nUZaZsv2iS47ED30gr4U3VLrm80/LfwmQRcOriHwuj3hRR98x6cdHUFDo+ESIGFV0SVw+IiFLjHIrtK66iukR2lTwdtU6cQdN/hEt7ZdS5byQ0HQCJy+DkCsdrhr0irVij3pLw8ozDci0oyZB8n6zjciw3fAVlueAZKO1m29+Sa01lsUzl0ntJRk/SSmlXNJWKIDb4Ofke5STJVL3SLLn22MxyLTBXiAjrEy7FR0WBTMDrdb/kBXkE/Lu/XgoKf1u6xgTwyLB4Ptx0Dkt1O7iXQcuHUzoR6f/75qFYLRYs5YWoKvIJ0ruhKjgvwm8NsQPBUiZCsc0qbeAz11UL0hlyHWg2GAy+aDSN8xb7UqFcm0M9rpJ5Wy3HyP3Z5pcleyy2b4Objm3fhBPpxTy8+AjrH+qPv6f+TzxQBQUFhT+XxvlX5m9KkJeevs0Dua1vHFtO5/Dx5nPY7TC0dQiLbu+Bh0HD2hNZbEzK5sd7+/BTHUfA9jN5tI30Zfd5aen5dmcyT41sxcOLj2CyOrdfPDEygU+3nmfr6Vwi/d2ZO7MbH2xI5NOpndFo1JgsVkwWG7f3i+PLbedJK6zA1rZOm1bqXsm6aGiaVKtrpCjzjao/Xhuk0PMKFXFl7+fS9qRSyT6nVRdsl4s4Nez6ULIvMo84HvOJcAT/mo3gGy1ByO0ngzFXMntGvSnF7/6vxbEQ2KzaRlECqERY2vOZjB4GceHYbVJMXtzqyPkIbi3By55BcjOXMEoK50WTIbwzXPMe/HCLtI90rZ7mo3OXAOWkn8DNRyb9RHSClB0wu7oIbnO9TPbKTQL3ANj2phTooa3/r73zDo+i6uLwb3s2vVfSSEhoAQxIKEJEo0FAwYJ8dARFEFGKih0FBRRBFFQUQYo0KaICApEqRdBAqKGmECAhBNLrbvZ+f5zsJptsgIS0Jed92IfszJ2ZM7Nz27mnAP2X0gT3+GqSTQiyTrgRS+5dkZ8C3p0BtRlmzzAz3Gwt8NX/HsCt3CKDO+L4HoGws1CgTRN7LNoXhwKtDhEtXPHTiDAUarS4lafB/gs38PIKWm20UcnR0l4LDxsppj6Qi3Rhja6eHvhsbyq0OoF2HmpodEC6fQic3LwhkSlIKXhqPVBcSBZZD5dkkUo9Q4LlpNIgODWWYi71XUAZ2v75Fig72ZApgaBIck0sz9WjpJTw60Z1Rm5RMZ23wpJWqDeNJWs1oaMMVP2X0WA86xoNyF1bUNyaPTOAFk9R3Cf99Zs+CLQbCAgJpSO/sINk1RSQUggSwMGH7uPMJlIEWbmQXHbelCnqajQpTmw8qJ49MATY+gadK3QYxfH543Xg2tFS2V1bUsysjS+Ra6OFPbD0CapPzR4Hen1O95yXTi5aWVfIuqfvt/Sc4/ZQXW7/Aj0re28g9XTJ879O7cDfX5BSihU5DHPXOFgp8eJDTdGvXRMk3sqFSi6Fl70lXG1UUNyFW5VJdDoUFORBAxlUShWUBWnQpcZCemwFVEIHNIukbHI3L5HSWaYEXo0BoAE2TwQuRtE4IHIG4NCU4vjJVSWLKlJQakvzJCFLwF6aD0uFZX2LQkilpIA/uZ4SUARFUjuvsq1YVCLBmPAATNlwAu/8ehLfDQ6FRNJAFFIMwzA1jESIyiJHMqbIysqCnZ0dMjMzYWtbsROpbRLScjF0yeEKljRutiosH9kRPb/6G05WSiwe/iA2n0jGor/JesPJSokZz4RgzM/RhlihXQKcMCY8ANtOJSM2ORtNHNR4qp0ndpy5jvXRV9C9mQu6NXNGoKs1vOwtsOxgItb8m2RYEQtwscaHfVrgoz9OY1kfO/is6VEq0KC1wOZJ5DJRFhsPYPB6ivGRnw78OpoCoeqRSGgV/exWUsZ0m0yr3YcW0P7Q4RRn5mQl2SLkKnL52PAifffvDnSdQCviCjWg05HftZ0vTfTKymfnTdY1Ue/TpEymAB75kKxpMpJKAhwWUhDTG+docvr7q8Yv13MAAEfiSURBVLQy328hWeHE7ab4OXpXEpfmgLULKaysXCi4bOzvwLmtNNGFjLJhbX+39P4Hr6fAyuXj6Ni4A09+Rff2zCIaYCYfI/evjqNJDv3EutUzAERpbJPnltBKYnEhkJkIFOaRksqjDQ2GhI6sebKTgStHKPV6kwfp95I3zBWt+q6LpkjPLcKmmKu4kVWIuLRcHIxLw+oXO2HyuuNGATsHh/kgooUbohNvITNfi3be9tDodJgXdQHLn/NEwN8TIUs6ZChf7NsNVx+eC5XKAg6nlkB55FtAYQUxchskq56vGBNJoSZLm7VDKsZXGvQLsPFlSgse0p/crG7EksVXRiIpOk+tp2DfMiW9DworoPsbVA9yU8nqxi2ErL+unyJrMs/2QMgzFKfq8kGgWU+ybLF0AJyCqb5pckvl8A8HOo2la6weQBY0/ZeSAti/e4kVj4TiW6bGUuBjvZJ00DogP4MsZm6ep2xVVi5kEZNymrJUOfgBycdJydX6WVKkZFymv6OXAgl/V/wB3dsAbZ4nBVfkLAoIbYrnV1Cg51tx5FaWdMR4f6dXSPlzbAUFaR/wM7mNFRfRhOTJBYDMPAOhmqIh1kWmkSME9WeafFqI0eRSe2RhB53SCrqz2yG/sBWFtv6QdnkV8kNfQyJXASHPUoB3a3dSIOizWhZlU78vkVI/fWhBab239aQ+t8WTwB+vQzw+HcV+PSBX1H2MmZqoi5M+nY3TRe746PEmNSzdPSIEPfPz20iB37IvxdBRVgwofzjuJubtvIDPn2uD5zvUnvsdwzBMfXJfKHK++eYbzJ49GykpKWjbti3mz5+Pjh07Vlp+3bp1+OCDD5CQkIBmzZrhs88+Q69ed8jyUkJ9DliFEFjxTyI+/O20yf0THm0GBysFHK1U2H46GWMfDkSf+fsNipun2nrisZZumPXnWVzNIEVQB18HTO/bCqnZhfjj+DUIAD1bucPHyRK/xVzD78evQacT6NPGE10CnTBlwwlczyoN7OdircLUJ1viVloKhlz7FNJLJSvrtl6UsvjsFlJcCEGDnDb/IyWGWyv6P3QYWdikxtIqf8hzpJg4sbb0xp6aDxycT1luXFsAD44Gtkw0/ZCaPEiBVVc9TwOuF7aRS9afU0pj8iitgPC3SYlzeKHx8W6tKdNM1IfAsz+SSXXCAaD9cLLiEYImiP98Ry4lbQdR9ppzWymmxu4ZFN/mz7doUvr3HONJnq0nTeo2vEhKocHrgIVdSzPxBEYAToEV5dIT/jaQsA+4FQ88u5jSHuvpNRu4FkOZrJybAZnXSJHj3orcP5yakSVUZhI9b6cAwMEfuH6GrAta9ClJ1ZxOE3elFSl4inJoEqxQk0uZVEZWQdYupmWsIxri5HHLiWsYt+oYVHIpfh4Vhoup2ZBKJZiy4aShzJAwHzhYKbHuvyuY8JAzejV3gLW8GBJtAQAJsHkiJPp03mUQAY9AEjqcfg/PUPp9Lv4FbJlkWpiWfel3O7qcvgdG0Ltt24R+4/R4qodHV5BFiXMwvbupZ2gFOvYPUgI1e5zcidYMJEWIjTtZnfiFA5b2NDlKPgHYetA5Lh8m6y+ZkrKr5d2kwJ/BvUgZk3y8VMaHJtFk6/jqUqs4hSWV0ysyvcMoJbpUTnFucm9Qu+HcnFwZfx1Nk67ITylmVt6t0vMHRgAPDAU2jAIGrqZ9zoHkRiUEtRFnN5PiSm/lN+gXUuLueJ/i2piiy3gg8DHKjLVruukyw/8Afh9PyqbQYdQmAPQ8+31HCik9Oh09h7w0ksvKme7JTFwz7rkuFmuBnBRS6ivU9I7dK8XFJecsIksq28rTFzc48m5CV5AFrU6CHLkdoLCEo5WqZq+RnUrKDZmClKByE+cv1kKXnQKtphAFQokMmSOsLeSVy6ItgtCn3ZYpqf3JuwkhkaJI5QiJtgCK4jxIpFJIZEqgKA9CpoBOroZUpwV0GkiKi6iuy5S0eFJcBKGyp3ZPIqU6oSmgsnI1yV+YDUBA6Cf0mnxAKoNEaEkWuZralaIcOqdMCUiVgK6IXCklErICLi4iazqFGrDzon6+xVPURpR1r1LZUMywo8uA+H2l2wesBP58E2LYH5A4B9bQD1U1aqJffPqD72CtVuKV8KZ3LlwfFGYDcXtpkUGmoAWJlv3ody3D93sv4XD8LWx+7SEEuJhn4GmGYZjbYR6jxNuwdu1aTJo0CQsXLkRYWBjmzZuHyMhInDt3Dq6urhXKHzx4EAMHDsTMmTPRp08frFq1Cv369cPRo0fRunXreriDuyerQItNxypPh7v1VDI+6N0SQ5eQ4qCDryPmDWiHN9Ydh6ZYoIWHDa5m5OO7IaG4mpEPmUSC2JRsTN8Si05NHRHZ2gM7zqQAEuCl5dG4fKt0Jf+Hv+Ow5WQyZjwdglHL/jNsv5FTiJwiLX74LwM9BnyGJtZzIDm1jpQk60YCPWeQ8ib1DFmp7PsCcPQjF6YHhlL68XZDaaIWsxJYP6qiBcHBr8mMdvt7pIDw7UwTnVwTwew6j6PJV7/vgKvHaIC2cXRpDBmAlBVRH5BVi62ncYrh66dIwRHci1xCrkYD/X8CDn1LK3D6SVaP92nwe34bWQ1tm0IrQy2eIkVJu8EUP6f8Sn3WNWDNYIr/o7Ci48vqUpt0MI71UZ6LUeRCkniQrIzKZiY6sogCMetdQyydgK6vk1Jt9UB6Fk0foWd05HuKGWLrRRZAXg+QlcI/39DzAUgh1PtLYOd04Fo0nUuTT3FE/MNL0sTfIZ16IyI1uwCzt58DQIGNM/KL0D3IBR/9ccZQRimT4uHmrlix/zz+GOAIZ1s1JDmJVB9ifydlgwklDgBILu0it8F1w8k1Z/TeUpckU8Tvo4C8R5eTkiT3BrDlDaAgA/B9CGg/DFjRr/T9Sz1DysrkmFI3PYCu4dyMsrTsngH0eBfYP4/e9d9eBa6WtgdQ2QAjttIEp2y68pSTpJx9bgnw65hSJc1/i8kC78wmwCmI3ukNo4zvI+kwKWue/BroNomCep/bRi5Tix+nut3rC3rHC7OMj734Fyleg3tRLBzXFjQhi/6pNI5NmwHUFmwcTVZpchUACSmOK0NpTQqsYysqLxOzijJ4+YSVWggCZG1UkFGqyNHkU33+9eXSbGRqB1JgB/Sga93PZKeUtD3f0jO19wEe/YjuvbouaDmp9N4fWkBKS1svck1t9njDdmvTFADXT0H8+RakV6OhlMphFdwPNzq+iSsKdwS52cBCcY+WXAWZ1Ldtf5esOhWW1L92GU/9oZ7s69D9twTSw99BWZAJpa0XtJ3exgFpKFoH+sHfudx7mZEEcWItJP98S/XbypliQnm1hyR+L1Q2HpD8PYfGBiobWgTx6QTJjg8ge24JuT0e+Z4m6dauZNVm6wW4h0ByfBUphex9qH8/tIDqikJN44uAR4C/PoTkyfm0L3QYJVw4vYnqtNIaCBtDCxoSKb0TSmtg84QSy9S2FEh942gq0/Z/5ALt3YmsFcsqcQCScdNYCghfVpGTcx3iqQXQWbnAnO3tErUOeMSi8pTv9Y7KhhaemoaTQufYCuDSHqDH22RRWsLwLn44n5qDV34+ik3jukKtNOdfhWEYpiINICT9vTF37ly89NJLeOGFF9CyZUssXLgQlpaWWLJkicnyX331FXr27Ik333wTLVq0wPTp0xEaGooFCxbUseRVRyaVQKWo/CdTK2T4N/GWoayvkxU2HbuGhUPa47shoXCztcCsP88i6VY+xv58FKNXROPLqPM4ejkdjzR3w4JdF3Dheg7OJGcbKXH0XM3IR3RiOsL8jQfCiWl5sLdU4PHFF3Gz8zsQQzdRppyR22gldMnjNGCK/R24fIBWya/8CyTuJyWJpT1N5E6uq6jEAYC0C+T2BFAgwYQD5MrkHVZaxtaLJj4XdtBq/u4ZpFiJ/d1YiVOWfxfRYLI82iKKSROzirJPbXkDuLSzdMKbm0b3Y+9LFgr5GbT9zymk2GjSkSYhZzebvm52Mg3Y935W8X41BbefuKlsaKAJ0LFlM2alJ9CKvn6SnHeTLIsuHyLFkhB0HxtfpCxEQInCbTilj973eakSB6DJ5ZqBpIzLTye3LQtb+g3i9lCmsJuXKpe1kaHR6pBws/T3dLJS4XDcTajKxHB40N8BR+JvYWa4FVzUEkhOb6RsHGc2kULuToHA9Wlu824Bcftowl8ZFva04t6yL8VK2vcFKRAAoMML9L6WVSJa2NMEqqwSR0/aBeDSbnpv1o+kcx782liJA5D82SnGShw9hdnAga9pkqWnIJOuaeNB7/LeWabvJTuFFKSuLem7XzfgTEndtvUia7vyShw9x1cDrfoBLkH0rP/5tvQ5awtpwn/md5roATSxlSmpDTCFREKBn/X3VBkFmWTxtH5UaSB1e19SLJXNvpKeQFm+yqaUz08n66L7vX7lpVOWwT0z6XkB1KZuGFny+1YSC+125GcAUVPJUkr/3LOukqLsxFpjV96Gxs0LwJJISPTvh04LVex6NNn0HLTpV3D+eg1MrhP2UyBz/bulyaM6sX4kKcAAID8dYvs7kO6dVfq7ZF2F447x6F64D19HnUVyRpm2KucGxIGvINk1vbT/yU2jrHDntwEeoZBsKeNqXZhNSpvjq4DeX5Dy5e8vSutTTir1NzfOkdLYJ4zaqqvRdE59XdHkk1L2vyUU3+rXl8kq95/vyP1an72yKIfOn3ICOLed+tCNo0rbjIcmApteoXIdX6L3Rwig7UDgv59MP0e99U6ZrHbCtSXg2hIyM45Hl5WdjVvCBm4NJDzObdErdDq9SlakmycZxUe0UMgw4dFmSLiZi7fWH8d94IDAMAxjhFkrcoqKihAdHY2IiAjDNqlUioiICBw6dMjkMYcOHTIqDwCRkZGVlm9IWKvkeKGLX6X7B3fyxbZTlOGmo78j9l9Mw+5zqRi17D/cyinCD/soXk5mfhG87EszPcilUqTnFeH4lUyE+Ttiz9nUSq+x59wNhDU1VuT4OFkiM0+DAQ96I1vuiDVJjkiybIni2C00aBy4mlwxWjxFrhuWzrSafuIXWtFX2VC8iUpv3LV0MGnXhBQTm8bQyvboPaQ0evht4N/FpHwBaNJ3NZoGgpVx8yKZT5dFKqMVeZmCJnSiuGJWHj0H5lFsGisnmtjptBSYuSADgKTUUsYUGYk0KfV/2Hj72T/ITLgy2vyP/MElUprE6yf2AFkb6DOHleXoMnJr05N3iyyF9IqwtPOAJsf09W7F0Tn1g9WDCyiVOkCD6Ys7K5e1kSGXSeFua2H4rtXpMH1rLAY8WOqfr1bI0NpZBpf4X2kC79mOfnM9dwouWdb9Ydc0cvmrjLYDaUW69bMVFStyVcUsbf7daNJVGTkpwIUomuz4dqbYL+VxbQFc/bfycyT8TW5helyCaULUcxagtieFUWWknAIgobrZok9pIGFLJ2OruvIU5ZJLoUwFnPzFdJnY36g9CYoELmwHjvwIdH+LlC7leeRDUgio7SnzXWW07Atc+gsozKTn3fZ/QJ+5FHTZomSipy0CDn9vOni7EOSaWVa5er+Rm1oax6s8Oz8CcpKrcc4bpCAwxe4ZpBRsiBRkATunlSofypKZBO+cE/jpQDwy8ytZmLgbslPIos0Ulw+VJh/IuQGJqfoNwO7QLAxppcT51DJKpfxbkESbXjzDke8rjwd18S+yAiqfqU7P4YVA815A8kmKg/XPt6bL6fup7GRSnpqKfwXQ8Q++ABz6plSJbWFHCiG9kluqKP1bbV/qkm2KrKvU/wPU9tl6QmJOLnwmOHMpAQDgY2dGBvu2HkCnMbTg99dHRok2vB0tMTY8AH+cSDZYzDIMw9wvmLUiJy0tDcXFxXBzM06v7ObmhpQU04O1lJSUKpUvLCxEVlaW0ac+ecDHHhEtKrqMdfR3xAPe9jh/nSbkdmoFbmSXTvIVcimuptMK2uojSXi5jO+zj6Ml/okjSx6tTkB5mywQSrkU2jJplV2sVWjv64DvhrTHI81dcepKJj7ekYgxv6cgU6ei1bJ1I2jAI1PQ6vfiCFLODPud4kwkHgB8Opv20QdI6ROzipQszfvQgCk3jbLyXDtGK7q/jzfOVAVQ4GG99YopnEpiZZSl7UCazBZmU9rw62dMHwuQMsY9BNj6JqUo1mdQ2P4erbwrrSo/1vMBigOhtKL4I3puXqLn0Oyxise0eIriA+TfKpFzu/H+TmMpW1V5ijUVrZKSjlA8ID2FORQQ0hTXT5XKmJ1Mg1s98XtpMloHNLS6WB5XGxXGP1oaFyE9T4PMPA2upuejf3sKGnn6WhZC3WVQWNiQSb6mwNgq5up/lSsHgiLJGk1PznVA7USufeXx7Qq06E2WNyrbO1v6ADSBqcx6DSClht7ty9Q7VR26TqD6sup5msCWjRtTHrsm1IY4+JFVmGMAbc9MKs1KZwobd3LJyLpaebY7IUjx+vinpOBsFkHKnEenkhtkSH9yaxu0ltK6K61JUfzAENP13CmA2jiVLfDcT5TZSldM7l8FWaWKa00uWQlURuoZspZrYNRYXbydoj0/vVSBXxUqU7wDZIGhn6Q3NAqzaeGjEpySdkCrE8grrIaVUtlrZCRWvv9KiYXdzYuVlynIhIM0H6eulvnNc1Irr1vFGrKWMIXKluK9VUZRyQLDpZ2Ayrry98E5kCxI1fbUR1WGJp/kLHt/SmvqU02Rnkjjjcpwaw3cjIPwD4cYsBISB9/Ky9YSNd0vnoq7ChWK4OVkXzMC1hVyC3LXd/QHdn1C1m0lhDV1wuAwH3y75xLm77zNYgHDMIyZYdaKnLpg5syZsLOzM3y8ves3+r2LjQVmPtMGq14KQ582HniitTtmP9cG/dp5olBTDGsVraJcuJ6DNk1KzXsT0nIR7E6BAE9ezcSV9Hx82KclXGxUyC7UwMmKgsTtOpuKXiGVryj1aeOBv2JphSrAxQpfD2yHTceScCOnEG+tP4G03CI4W6sQm5yFbK/udJC2kFyeTm0oHbgnn6BtXqE0eTq7GXj6e3LvKEu7wZROuU1/YOQOsh5xDym1XDheYu1jisuHaAJWLgCegS4TSuPRqGxoUvnAMFIaxawEgnrePkCmyobkSToMnNwADNtEcWScg2hQ2Xm86eOahNFxO94j0+xuk+lj5ULKqjO/UQye/62miWLoMAqc7BVKE39IyHXrv8V0Pgs7oM+X9EzLZy/SU9YFC6AJc2HJoFgipbgRopKBuGMAcL0kWK/SynjF2Ma94rlriYZWF8sjkUjQs5U7XujqB6kEOHjpJnoEu+LdX0/i+Qe9seqlMHT0d4RMrqDf0dazovLy4AJy8QmMKHtiSmMfOpxcg/SobGhC0m4w8NIuCvIdNoaCYLfsCyx6FNg727T7lbagYqyQpMNAwKOV36BtE3KB0stkKiBtaiwpWSrDrxul/Lb3oQxsSUdKJ1WHvwe6vGb6OLmK4vdoCyl+ypV/SbElU9KEX6cl5Ykpwt8GFNa3d0MDSPGy7R1Kva5QAydWA7s/IVfJRz4AfLqQVVCLJ6n9+esjsqB4fkWJklVJE9MHX6K4U7be1OatG0Fxf06uAyDIgtC25N1VWFLGr8pwCgKUDc/Hocbq4p1+k8qU+7ejfB9S4ZwWt99fX0hlZF1WCYWW7pCA0itXG5mSAglXhnXJIlFZZb0JdDIlAl3LuAAr1JUXBirvIzT5d34HZAoK/A0JtTumyEunxZOiXArEXxkSCfV3LmViu+WmkrWPHp2m9B06upRiB5nC2g3wewii/1LgqfmQVNb+1DI13S+euJYNX8l1yNQNI4FAlZDJgbaDaSz118dG7qp92niif/smmBN1Hu9vOoki7W0sphmGYcwEs1bkODs7QyaT4fp1Y9PX69evw93ddNYLd3f3KpV/5513kJmZafgkJSXVjPD3gIuNCl0CnPHp063xUKAzvthxDu/+ego/7o/HrGdCIJEAl27koImDJVxtaCC8PvoKRnT1M5zjh31x2HwiGW9FBuPdJ1qge5AzVHIp4tNyYaGQoXNAxQFlR39HdA9ywbS+rbFoWHu8/mgzuNtZIKK5O4JcrTHvf+3Q0sMGL3T1g04AK04VIvOhDyregLUbuUJ5tiuJfSMh1x2ZijI6DVwLDFgBvLiTJo5bJpFJeE4qBTU8tQF45geKjZF0hCaUIc8ZX0NpTQFMT6yjia11GSsmhSUQOZMCuD67mCZWfeaRcuXQAuB/q2gy5hZClkKVDVLbDSp1L8krCTx84xxZByT9A9i40cRUv2IvkQIt+gKPfVR6jqj3KUaBjScQ8THQfzkFf932DvDbKzQQyU4Bfn+N3GSatAcu7qA4BEN+BUZupwmxlQspxkzhFEgxUsrSsl+pRU/r50pSPZtA7UCDXn1Ax7YDjV0hHhha+eC6hmmIdbE8TtYqvPJwADa+0gXtfewx6fEg2FsqMfbnaEAAYf4OSIcdtA7+pIi48h8pKvVo8ihWhXsIMPQ3Chz8wjbKOLZ+pPHKdpfXSKFxagOgtCWFTkh/Ui78+RaVTY8nxaB/uLGgh78HHptm/Lvr41c0ebDijakd6N1r/Sx9P/ELBQctj05LdaDTKxX3qWyAxz6md6/Hu+QKGV0m/sTx1RSMts3AcsfZAs8uof93fkzPSJ+a/JlF1J5ETQWemE3tg/59tLAnixqXYGD9cFqhrUzJ5NqSVvsvRtFzzU2j+FgDf6HrFWaRctejLbl5nFhDx2kLyMImuBfJMmgtKXOVNoBLM/rtnvmRLKMe/wQYe4hcuOQlE1u5Cuj8SuX1r/vk21v21RM1Vhcd/SpXvPh3JzfcqmLrVbllV5OOt1WW1CvWbvQuVEJa4HNo6WELZ5t7yF5l5Qy0etb0Prmq1O3R3qfSoNDFPl1xIFmC1p5lJvqWzsbKkLI4B5UsQJhAaUULEdZupvd7h1EmxrYDyFox4BHT5TISgYCHydqmKIes90wR+DhlmOw0trTOFWvI8lZ/7iOLqJ2SSMhCNvk4BYu3KpOl0aczxPA/ICxdoHMKqhdLHD013S+euCmBvzKDUq+bI3IlEDqU/t45HdCWWqM+E9oEL3VrijVHktD3m/04djm9kpMwDMOYB2affjwsLAwdO3bE/PnzAQA6nQ4+Pj549dVX8fbbb1coP2DAAOTl5eGPP0rjUnTp0gVt2rTBwoWVpHwuQ0NLeXwztxDxN3Lx8z+JyC0qxqsPB0Aul2LFoURkF2jxYjd/zPvrAvZduIH+7b3R1tsOc3ecx81ccoto5mqND/u0xK6z1xHq64jJvxyHRAJ82KclFHIpos5chxACj7V0g4VCBmdrpSFrhqOVEiq5FEVagWKdDgqZFJpiHeQyKWb9eRZ/nkrB611d8XyAFi5nV0CZn4rioF4o9n8EQqaA9O8voDj+M62QDl4HIZEAqWchUdnQhEmng9AVQVKUR4MKhSUEStOPSqCjAWJhNlk35N+i7DjWrjR4PLeNlB7NepKfvT4miNqBTNiPLqeMNf7dKebFzQuUpaJZSSacuH2Ad0dAaCtmxAmKBDq+TLFHQoeRDCd+AR6aQC5fcXuoXLdJpCjR5JNpuK6YrC7aDSbZDn1DljBNHwHaDSTlkcoGKC6k+DPHV9GA84GhlCp87WCaLLcdSNYN2cn0ObeVskhtGmOczcvKmQJDb55YGv+g22SybIj+ic7bcTSt1G5/1zhWgY0HKbViVlFg6IBH6V43jKT9T8wmS4R6SkPe0OpiWbIKNEjLLsSJKxkIdLXB/gs3EJ2YjqdDm8DLXg1vZTbsU49AKpVS9rL9c4DL/xiOF54PQPLI+5Tt6LHppDDYP5diwTgFAJ3GkSuQgy+w7gWaeA3aAOTfBHKvl2QuK3EXsLADBq6hAN1lXVnaDQE6vkjvYNp5UhY9OIrcjBL2UzwZTR4Q3Jviu5z4hZQwV/+jgKNDNtJEZ99npGSVyskSKHwKKf4snYFjy+l99A+n4MG5qRQ8tNMYuq4+iLiDPwXVdvCnVNwSCZB2jpQ3Nu5k4ZZwAOjyKq2623gAf75JCs3O4wEHH0AiJyVt/k26d6m8JO7OeYpdpbYnpcq2KcaxeBz8yeIt8QC1BTfjqKyVM01uc29SvcpPh67XXMC3M6SFWaXKJH1MLJU1tUdWLoBdmew/d6IojzL6/fZKqeuI0pri6QT3ovaggVPtuqgrJmXmin7Ggd/tfcj11tG/6sLodORmu+zJUtccgBQ8w/+o3HKrIZCdAvHbq5BcjDLanNFjJjajOx5/IBCuZeJwVYvMJGDl8+S2p0empDbCrxtNhHU6spxb3tf4Gdr7IOnJNci19Eawuw0keqWpTgeRchKSn58uDXYMUF0YvJ5+29/GUSwbPUorssI9vpYm3htHG7u96a32bsWTG6lvVwCCXJnLZpSzsAee/o4WJrzDKDhyzxkUf6+sm5VHW4rFlZ0CZF4jJeIfr5G8chW5QCb8TeOCoJ7Uxx5fQ+1QcG+IVv2oriosAJUtJPYNyyJUz730i5l5GrSdtgNj7A8jvFuPOx/QkMlKpgULrwdo4aCM63h8Wi5+2HcJCTfzEB7kgoEdvREe5MpZrRiGMTvMXpGzdu1aDB8+HN9//z06duyIefPm4ZdffsHZs2fh5uaGYcOGwcvLCzNnUpaegwcPIjw8HLNmzULv3r2xZs0azJgx467TjzfUyWOxTgedABQyWkXRFBejQKODhVyKtJwi5BZpUawDnK2VKNAWIz1XA7lMAmdrFZRyKfIKtRAQyCvSIToxHdczC9AlwBle9hYo0BajQFsMa5UCCqkEN7ILoVbKoZRLkVOoRXaBBk5WKjhZK2FvSW5Mt3KLcD2rAIfjb8FOLcdDTR1gqwQkCgsoZFJIJBLoCnOBvJsQumIUStVIhw2sZICVyILQFCBPagmZTAGlJhvFkCBdWEEiVUKplENSrIWD7iYkEKQI0WoACQAhIGQKSKRKMlUvLqSJsiYHyMsAUAzYeFHZwmwawOrdTIqLKeuT3u2lKIeUOxaOkNh5kL983k0Il+Y0wSzWlJiMSwFdISl/FFaA0EFSXESTUbkloNNAZF+jia1EQoEhi7XkXqErpEGqwpJkkFvSBF0iIeVVYR7JqrIlRZW2AJCrSfmVe4OUTJYutF1bSNZDaWdphdG5GeDehiZL5/6kAWjTHnTuvJs0WVQ7AroiKiNTAUXZ5J6ldqRBuCaf3KrcWtNg9+JOktW3M92PVf2tbjfUumgKnU4gt0gLuVQCmVQKQECbkwaVNhtSoaPfQyqld9LKFVqVA3RaDWRFmZCqLCGEBFIJ6F3XFNBvqckHEg8BrsHkeiBTlLqNaAuBrGuQ5N2AsPclix2plK5z4yxNkux9yOUIOlJSypSkDNGUBAeWSkvqF4DiIgiZHIC0pM7kA1eP0aRYbV86wVFa0SRQFFPZglulihegdLIuV5O82kJyZZBb0DmLNXQ+TSGd09KJFLfaIiovU5F82kIKoJqbRkonCztaURc6SIo1EEJXGrAcEjqmIJPeeaEjpW7mVcDBm951hZoG+rlpkEiltE2movajIIOOsXQqec5lXFOKi4G8G6VWQpYugKoaFjTFWgrsm5NKii1rN7Loq8wttIFxT3WxWEvWYEmHSYnm3YEspGyroAwrj66Yft+r/wI3LtBkzq11xeD2DZHcG9ClX4a4tAsauTUKfB5GltwRljb2cLa+B2ucsmSnkEtj4kF6Jj5dyY24rCtbyTPUXfkX4uZFaN1DoXEKRp7KBS42FqVKnDLlRXo8WbCkxtLzdmtFVrPaAlKSZiQB16LJDcq1OXD1OKC2o3KafFIM37oIiXMwtRlyC8qgZeMKpF+mPlKmJAVN2nnAwQ8Sp2bU3+WlQfh3o3qYcZkUPIXZQPY1skq1dKZ6mp9B/ZZUAUCULPBISvpfC+r3dcXUdsgtgOJCCKUtpFZ3cAFrINxLXfwt5ipeXxODBX4H4BRiIk6fuXHjLHB0BfUNXV8zUubodAIHLqVh2+kUxN3IhUImQZsm9mjbxB4tPGwQ7G6DZq42rNxhGKZBY/aKHABYsGABZs+ejZSUFLRr1w5ff/01wsIoI8/DDz8MPz8/LF261FB+3bp1eP/995GQkIBmzZrh888/R69eve7qWuY0eWSY+xmuiwzTMOC6yDANg3upi68uP4QzsafwaWgu4Nm2liSsY67FUHyyJh0oDqI+Y2AZkjPycfxKBs5dz0bizTwkZ5IroASU9aq5uw19PGzRytMWPo6WFRWZDMMw9cB9ocipS3jAyjANA66LDNMw4LrIMA2D6tbFAk0xOkz7Ez2L9+LZR7rcMeC1WXHjLHByPf3t351iM2oLyDItO4WsRpVWFOfJKxQFzq1xNUuDpFt59EnPR9KtPGTkawAAthZytPd1QKemTugS4IxWnraQSlmxwzBM3XOb9AUMwzAMwzAMw9zPrDp8GXka4CHrRMDiifoWp2ZxaU7WOAl/A1ejgYt/kYue2qHEjdyJXPvi9wJnNsFCZYMAv4cQ4NMZCGhJrsAAMvKKkHAzF5du5OJ8SjbmRp3HzD/Pwt5SgS4BTujU1AmhPg4IcrOBUm6mwaIZhjErWJHDMAzDMAzDMI2QW7lF+HbPRTwkPwM3T786y0RZp6isgeAn6FMZQpCFTnIMcPkwxRaUSCngvtoR9jIl2kllaCdXAk6O0Po1wXlpAE5lW+J0cjZ2nL4OrU5ALpXA29ESfk6W8LRXw8POAu52anjaWcDLQQ1Pe7UhniXDMMy9wIochmEYhmEYhmlkXMvIxysrj0JTkIf+kl1AkxfrW6T6QyKhoN+2HpS5LCeVUtvn3igJgl0EaEuC5addhDxvK1rqitFSaQW4t0aRbwskSH2QWGiDa/lS3MgtQHxaDm7lapBVoDVcRioB3Gwt4GWvhpu1HHZKHdRSHaQQKJYqoJWqoBESCAFIJBKo5FKolTJYq+SwUyvgaKWEg6USTtZKOFlRkhEZu3YxTKOEFTlVRB9SKCsr6w4lGYapDjY2NncVSJDrIsPULlwXGaZhUBt1cfLGWESdTQMAdJBcxH5pa+hOpAJIvSdZ7y9sSj7lkAGw1kFSmAVJfjoQrwPiLwC4AACwK/kElhTXSOW4KWyRBjvcEPZIznQwBFVmTCMBIJdJIJNIAPoHgBJpCgHohIC2WKCqgV4lAGRSCWRSCSQg/Z2XvQVmPBWM5m7Wdzz+busiw9QFHOy4ily5cgXe3t71LQbD3LfcbZBGrosMU7twXWSYhkFt1EW3QbNg4d0aolgLG0ke5FKyAmGqT20bxuggQQ7UKAanRa9p0nf/hKwjG+5YjoP6Mw0JVuRUEZ1Oh2vXrkEIAR8fHyQlJTX4Cp2VlQVvb2+zkBVgeWubhi7v3a526Oti+fIN/f5qm8Z8/3zvNXvv91oX64vG/B7UJPwca4aaeI71XRf5Xaga/Lyqhjk9r4bSzzEMwK5VVUYqlaJJkyYGs1VbW9sG3+joMSdZAZa3tjE3ecujr4uVYe73d6805vvne6/be79TXawvGvN7UJPwc6wZ6uI51nZd5HehavDzqhr8vBimanDYdIZhGIZhGIZhGIZhGDOBFTkMwzAMwzAMwzAMwzBmAityqolKpcLUqVOhUqnqW5Q7Yk6yAixvbWNu8laV+/3+7kRjvn++98Z57+XhZ1Ez8HOsGe6H53g/3ENdws+ravDzYpjqwcGOGYZhGIZhGIZhGIZhzAS2yGEYhmEYhmEYhmEYhjETWJHDMAzDMAzDMAzDMAxjJrAih2EYhmEYhmEYhmEYxkxgRQ7DMAzDMAzDMAzDMIyZwIqc2/DNN9/Az88PFhYWCAsLw5EjR25bft26dWjevDksLCwQEhKCrVu31pGkVZN10aJF6NatGxwcHODg4ICIiIg73ltNU9Vnq2fNmjWQSCTo169f7QpYjqrKm5GRgXHjxsHDwwMqlQpBQUEN9n0AgHnz5iE4OBhqtRre3t6YOHEiCgoK6kjaqmNOdbM2MLf6XpOYW9tRk5hbO1QXfPrpp+jSpQssLS1hb29vsszly5fRu3dvWFpawtXVFW+++Sa0Wq1RmT179iA0NBQqlQqBgYFYunRp7QvfgPHz84NEIjH6zJo1y6jMiRMn0K1bN1hYWMDb2xuff/55PUnbsKlum1XXNPZ+tao05n64OjTmvpthag3BmGTNmjVCqVSKJUuWiNOnT4uXXnpJ2Nvbi+vXr5ssf+DAASGTycTnn38uzpw5I95//32hUCjEyZMnG5ysgwYNEt988404duyYiI2NFSNGjBB2dnbiypUrtS5rdeTVEx8fL7y8vES3bt1E375960RWIaoub2FhoejQoYPo1auX2L9/v4iPjxd79uwRMTExDVLelStXCpVKJVauXCni4+PF9u3bhYeHh5g4cWKdyFtVzKlu1gbmVt9rEnNrO2oSc2uH6ooPP/xQzJ07V0yaNEnY2dlV2K/VakXr1q1FRESEOHbsmNi6datwdnYW77zzjqFMXFycsLS0FJMmTRJnzpwR8+fPFzKZTGzbtq0O76Rh4evrK6ZNmyaSk5MNn5ycHMP+zMxM4ebmJgYPHixOnTolVq9eLdRqtfj+++/rUeqGR3XbrLqmsferVaUx98PVoTH33QxTm7AipxI6duwoxo0bZ/heXFwsPD09xcyZM02Wf/7550Xv3r2NtoWFhYmXX365VuUUouqylker1QobGxuxbNmy2hLRiOrIq9VqRZcuXcSPP/4ohg8fXqcNelXl/e6770TTpk1FUVFRXYloRFXlHTdunHjkkUeMtk2aNEl07dq1VuWsLuZUN2sDc6vvNYm5tR01ibm1Q3XNTz/9ZFKRs3XrViGVSkVKSoph23fffSdsbW1FYWGhEEKIt956S7Rq1crouAEDBojIyMhalbkh4+vrK7788stK93/77bfCwcHB8AyFEGLKlCkiODi4DqQzH+61va4rGnu/WlUacz9cHRpz380wtQm7VpmgqKgI0dHRiIiIMGyTSqWIiIjAoUOHTB5z6NAho/IAEBkZWWn5+pS1PHl5edBoNHB0dKwtMQ1UV95p06bB1dUVo0aNqnUZy1IdeX///Xd07twZ48aNg5ubG1q3bo0ZM2aguLi4QcrbpUsXREdHG8xc4+LisHXrVvTq1avW5a0q5lQ3awNzq+81ibm1HTWJubVDDYlDhw4hJCQEbm5uhm2RkZHIysrC6dOnDWXulzaiJpk1axacnJzwwAMPYPbs2UbuaIcOHUL37t2hVCoN2yIjI3Hu3Dmkp6fXh7gNjppor+uCxt6vVpXG3A9Xh8bcdzNMbSOvbwEaImlpaSguLjYa+AGAm5sbzp49a/KYlJQUk+VTUlJqTU6gerKWZ8qUKfD09KzQKdcG1ZF3//79WLx4MWJiYmpdvvJUR964uDjs2rULgwcPxtatW3Hx4kW88sor0Gg0mDp1aoOTd9CgQUhLS8NDDz0EIQS0Wi3GjBmDd999t1ZlrQ7mVDdrA3Or7zWJubUdNYm5tUMNicrqv37f7cpkZWUhPz8farW6boRtQLz22msIDQ2Fo6MjDh48iHfeeQfJycmYO3cuAHpm/v7+RseUfa4ODg51LnNDoyba67qgsferVaUx98PVoTH33QxT27BFTiNn1qxZWLNmDX799VdYWFjUtzgVyM7OxtChQ7Fo0SI4OzvXtzh3hU6ng6urK3744Qe0b98eAwYMwHvvvYeFCxfWt2gm2bNnD2bMmIFvv/0WR48excaNG7FlyxZMnz69vkVjapiGXt9rEnNsO2oSc2uHyvL2229XCLRb/tOQJsLmQlWe66RJk/Dwww+jTZs2GDNmDObMmYP58+ejsLCwnu+CYcybxtQPV4fG3nczTFVgixwTODs7QyaT4fr160bbr1+/Dnd3d5PHuLu7V6l8TVEdWfV88cUXmDVrFv766y+0adOmNsU0UFV5L126hISEBDz55JOGbTqdDgAgl8tx7tw5BAQENBh5AcDDwwMKhQIymcywrUWLFkhJSUFRUZGRKXpDkPeDDz7A0KFD8eKLLwIAQkJCkJubi9GjR+O9996DVNpw9L3mVDdrA3Or7zWJubUdNYm5tUP3yuTJkzFixIjblmnatOldncvd3b1CdhT9c9Q/u8raCFtb2/vKGudenmtYWBi0Wi0SEhIQHBxc6TMDYJZta21wL+11XdLY+9Wq0pj74erQmPtuhqltGs4MrQGhVCrRvn177Ny507BNp9Nh586d6Ny5s8ljOnfubFQeAKKioiotX5+yAsDnn3+O6dOnY9u2bejQoUOtyliWqsrbvHlznDx5EjExMYbPU089hR49eiAmJgbe3t4NSl4A6Nq1Ky5evGjoeADg/Pnz8PDwqPXJU3XkzcvLq6Cs0U/+hBC1J2w1MKe6WRuYW32vScyt7ahJzK0duldcXFzQvHnz237u9h46d+6MkydPIjU11bAtKioKtra2aNmypaHM/dJG3I57ea4xMTGQSqVwdXUFQM9s37590Gg0hjJRUVEIDg5mt6oSqtte1zWNvV+tKo25H64OjbnvZphap76jLTdU1qxZI1QqlVi6dKk4c+aMGD16tLC3tzdkvhg6dKh4++23DeUPHDgg5HK5+OKLL0RsbKyYOnVqnaYfr4qss2bNEkqlUqxfv94otWh2dnaty1odectT19Hrqyrv5cuXhY2NjXj11VfFuXPnxObNm4Wrq6v45JNPGqS8U6dOFTY2NmL16tUiLi5O7NixQwQEBIjnn3++TuStKuZUN2sDc6vvNYm5tR01ibm1Q3VFYmKiOHbsmPj444+FtbW1OHbsmDh27Jjh/danH3/88cdFTEyM2LZtm3BxcTGZfvzNN98UsbGx4ptvvmnU6ccPHjwovvzySxETEyMuXbokfv75Z+Hi4iKGDRtmKJORkSHc3NzE0KFDxalTp8SaNWuEpaUlpx8vx53qbUOhsferVaUx98PVoTH33QxTm7Ai5zbMnz9f+Pj4CKVSKTp27Cj++ecfw77w8HAxfPhwo/K//PKLCAoKEkqlUrRq1Ups2bKlQcrq6+srAFT4TJ06tUHKW576aNCrKu/BgwdFWFiYUKlUomnTpuLTTz8VWq22Qcqr0WjERx99JAICAoSFhYXw9vYWr7zyikhPT68zeauKOdXN2sDc6ntNYm5tR01ibu1QXTB8+HCT7/fu3bsNZRISEsQTTzwh1Gq1cHZ2FpMnTxYajcboPLt37xbt2rUTSqVSNG3aVPz00091eyMNiOjoaBEWFibs7OyEhYWFaNGihZgxY4YoKCgwKnf8+HHx0EMPCZVKJby8vMSsWbPqSeKGze3qbUOisferVaUx98PVoTH33QxTW0iEaGC+EwzDMAzDMAzDMAzDMIxJOEYOwzAMwzAMwzAMwzCMmcCKHIZhGIZhGIZhGIZhGDOBFTkMwzAMwzAMwzAMwzBmAityGIZhGIZhGIZhGIZhzARW5DAMwzAMwzAMwzAMw5gJrMhhGIZhGIZhGIZhGIYxE1iRwzAMwzAMwzAMwzAMYyawIoepcZYuXQp7e/sqHTNixAj069evVuRpSPj5+WHevHn1LQbD3Lc8/PDDmDBhQn2LwTBmy549eyCRSJCRkVHtc3A9ZJjq8dFHH6Fdu3b1LQbDMGYAK3KYu6YyZUv5Qd+AAQNw/vz5uhXuDtztwFRfTv9xcXFBr169cPLkySpdrzJl1r///ovRo0dX6VxM4+bQoUOQyWTo3bt3fYtSZ2zevBnh4eGwsbGBpaUlHnzwQSxdutSoTE1MNhmmrinbv5j6fPTRRxWOycvLwzvvvIOAgABYWFjAxcUF4eHh+O233wxlqrtIYErh0qVLFyQnJ8POzu6Ox1dWDzdu3Ijp06dXWR6GaWiMGDHCUD+VSiUCAwMxbdo0aLVao3Jz5syBg4MDCgoKKpwjLy8Ptra2+Prrr+tKbIZhGgGsyGFqHLVaDVdX1/oW4544d+4ckpOTsX37dhQWFqJ3794oKiq65/O6uLjA0tKyBiRkGguLFy/G+PHjsW/fPly7dq1WryWEqDA4rWvmz5+Pvn37omvXrjh8+DBOnDiB//3vfxgzZgzeeOONepGpJuo+wwBAcnKy4TNv3jzY2toabTP1jo8ZMwYbN27E/PnzcfbsWWzbtg3PPfccbt68WSsyKpVKuLu7QyKRVPscjo6OsLGxqUGpGKb+6NmzJ5KTk3HhwgVMnjwZH330EWbPnm1UZujQocjNzcXGjRsrHL9+/XoUFRVhyJAhdSUywzCNAFbkMDWOKWuUTz75BK6urrCxscGLL76It99+26Tp6BdffAEPDw84OTlh3Lhx0Gg0hn2FhYV444034OXlBSsrK4SFhWHPnj2G/YmJiXjyySfh4OAAKysrtGrVClu3bkVCQgJ69OgBAHBwcIBEIsGIESNuew+urq5wd3dHaGgoJkyYgKSkJJw9e9awf+7cuQgJCYGVlRW8vb3xyiuvICcnBwCtUL7wwgvIzMyssMpaftX08uXL6Nu3L6ytrWFra4vnn38e169fv/NDZhoFOTk5WLt2LcaOHYvevXsbWaUMGjQIAwYMMCqv0Wjg7OyM5cuXAwB0Oh1mzpwJf39/qNVqtG3bFuvXrzeU16+m//nnn2jfvj1UKhX279+PS5cuoW/fvnBzc4O1tTUefPBB/PXXX0bXSk5ORu/evaFWq+Hv749Vq1ZVeL8zMjLw4osvwsXFBba2tnjkkUdw/PjxSu83KSkJkydPxoQJEzBjxgy0bNkSgYGBmDx5MmbPno05c+bg8OHDd6zTOp0Ob731FhwdHeHu7l7ByuFOculN23/88Uf4+/vDwsICAA3GQ0JCoFar4eTkhIiICOTm5lb+AzJMOdzd3Q0fOzs7SCQSo23W1tYVjvn999/x7rvvolevXvDz80P79u0xfvx4jBw5EgBZ1SQmJmLixImGPgcAbt68iYEDB8LLywuWlpYICQnB6tWrDecdMWIE9u7di6+++spwXEJCQgUrm+r0reUtfQoLCzFlyhR4e3tDpVIhMDAQixcvroUnzDA1j0qlgru7O3x9fTF27FhERETg999/Nyrj6uqKJ598EkuWLKlw/JIlS9CvXz84OjpiypQpCAoKgqWlJZo2bYoPPvjAaKxbHlNWc/369TPq8+40PmYY5v6EFTlMrbNy5Up8+umn+OyzzxAdHQ0fHx989913Fcrt3r0bly5dwu7du7Fs2TIsXbrUaOL66quv4tChQ1izZg1OnDiB/v37o2fPnrhw4QIAYNy4cSgsLMS+fftw8uRJfPbZZ7C2toa3tzc2bNgAoNTS5quvvror2TMzM7FmzRoAtEqpRyqV4uuvv8bp06exbNky7Nq1C2+99RYAMksvv9JqapVVp9Ohb9++uHXrFvbu3YuoqCjExcVVmJwzjZdffvkFzZs3R3BwMIYMGYIlS5ZACAEAGDx4MP744w+DAhEAtm/fjry8PDz99NMAgJkzZ2L58uVYuHAhTp8+jYkTJ2LIkCHYu3ev0XXefvttzJo1C7GxsWjTpg1ycnLQq1cv7Ny5E8eOHUPPnj3x5JNP4vLly4Zjhg0bhmvXrmHPnj3YsGEDfvjhB6Smphqdt3///khNTcWff/6J6OhohIaG4tFHH8WtW7dM3u/69euh0WhM1peXX34Z1tbWWL169R3r9LJly2BlZYXDhw/j888/x7Rp0xAVFVUluS5evIgNGzZg48aNiImJQXJyMgYOHIiRI0ciNjYWe/bswTPPPGP4PRimtnB3d8fWrVuRnZ1tcv/GjRvRpEkTTJs2zdDnAEBBQQHat2+PLVu24NSpUxg9ejSGDh2KI0eOAAC++uordO7cGS+99JLhOG9v7wrnr4m+ddiwYVi9ejW+/vprxMbG4vvvvzeptGIYc0CtVpu01Bw1ahR27dqFxMREw7a4uDjs27cPo0aNAgDY2Nhg6dKlOHPmDL766issWrQIX3755T3Jc6fxMcMw9ymCYe6S4cOHC5lMJqysrIw+FhYWAoBIT08XQgjx008/CTs7O8NxYWFhYty4cUbn6tq1q2jbtq3RuX19fYVWqzVs69+/vxgwYIAQQojExEQhk8nE1atXjc7z6KOPinfeeUcIIURISIj46KOPTMq+e/duIxkrQ19Of28ABADx1FNP3fa4devWCScnJ8P38s9Aj6+vr/jyyy+FEELs2LFDyGQycfnyZcP+06dPCwDiyJEjt70e0zjo0qWLmDdvnhBCCI1GI5ydncXu3buNvi9fvtxQfuDAgYY6U1BQICwtLcXBgweNzjlq1CgxcOBAIUTp+75p06Y7ytKqVSsxf/58IYQQsbGxAoD4999/DfsvXLggABje77///lvY2tqKgoICo/MEBASI77//3uQ1xowZY7Le6GnTpo144oknjGQvX6fDw8PFQw89ZLTtwQcfFFOmTLlruaZOnSoUCoVITU017I+OjhYAREJCQqXyMUxVqKyfKM/evXtFkyZNhEKhEB06dBATJkwQ+/fvNypTtm+5Hb179xaTJ082fA8PDxevv/66UZnydas6fWvZ8547d04AEFFRUXeUj2EaGsOHDxd9+/YVQgih0+lEVFSUUKlU4o033qhQVqvVCi8vLzF16lTDtg8++ED4+PiI4uJik+efPXu2aN++veH71KlTjcbHpupo3759xfDhw4UQdzc+Zhjm/oQtcpgq0aNHD8TExBh9fvzxx9sec+7cOXTs2NFoW/nvANCqVSvIZDLDdw8PD8MK/8mTJ1FcXIygoCBYW1sbPnv37sWlS5cAAK+99ho++eQTdO3aFVOnTsWJEyeqfZ9///03oqOjsXTpUgQFBWHhwoVG+//66y88+uij8PLygo2NDYYOHYqbN28iLy/vrq8RGxsLb29voxXQli1bwt7eHrGxsdWWnbk/OHfuHI4cOYKBAwcCAORyOQYMGGBwR5DL5Xj++eexcuVKAEBubi5+++03DB48GABZlOTl5eGxxx4zqjPLly831Bk9HTp0MPqek5ODN954Ay1atIC9vT2sra0RGxtrsMg5d+4c5HI5QkNDDccEBgbCwcHB8P348ePIycmBk5OT0fXj4+MrXL+madOmjdH3sm3J3crl6+sLFxcXw/e2bdvi0UcfRUhICPr3749FixYhPT29Vu+DaVxcvnzZ6J2cMWMGAKB79+6Ii4vDzp078dxzz+H06dPo1q3bHYMJFxcXY/r06QgJCYGjoyOsra2xfft2I8u6u+Fe+9aYmBjIZDKEh4dX6TiGaShs3rwZ1tbWsLCwwBNPPIEBAwagT58+RvV15cqVkMlkGD58OJYuXQohBHQ6HZYtW4YXXngBUilNudauXYuuXbsaXCnff//9KtfJstzN+JhhmPsTeX0LwJgXVlZWCAwMNNp25cqVGjm3QqEw+i6RSKDT6QDQxFImkyE6OtpI2QPAYJ794osvIjIyElu2bMGOHTswc+ZMzJkzB+PHj6+yLP7+/rC3t0dwcDBSU1MxYMAA7Nu3DwCQkJCAPn36YOzYsfj000/h6OiI/fv3Y9SoUSgqKuJgxkyNsHjxYmi1Wnh6ehq2CSGgUqmwYMEC2NnZYfDgwQgPD0dqaiqioqKgVqvRs2dPADC4XG3ZsgVeXl5G51apVEbfraysjL6/8cYbiIqKwhdffIHAwECo1Wo899xzVQr6m5OTAw8PD5N++qYyugFAUFAQMjMzce3aNaP7Bijg8KVLlwwxOW7HndqSu5Gr/DORyWSIiorCwYMHsWPHDsyfPx/vvfceDh8+DH9//zvKxDB3wtPTEzExMYbvjo6Ohr8VCgW6deuGbt26YcqUKfjkk08wbdo0TJkyxcjttyyzZ8/GV199hXnz5hliuk2YMKHKwbvvtW9Vq9VVuh7DNDR69OiB7777DkqlEp6enpDL5cjPzzeqr25ubgCAkSNHYubMmdi1axd0Oh2SkpLwwgsvAKAslIMHD8bHH3+MyMhI2NnZYc2aNZgzZ06l15ZKpRVceMvG1Lmb8THDMPcnbJHD1DrBwcH4999/jbaV/34nHnjgARQXFyM1NRWBgYFGH3d3d0M5b29vQ4aPyZMnY9GiRQBK49sUFxdXWf5x48bh1KlT+PXXXwEA0dHR0Ol0mDNnDjp16oSgoKAK2YSUSuUdr9WiRQskJSUhKSnJsO3MmTPIyMhAy5Ytqywnc/+g1WqxfPlyzJkzx8j67fjx4/D09DQELO3SpQu8vb2xdu1arFy5Ev379zcoMVq2bAmVSoXLly9XqDOm4mCU5cCBAxgxYgSefvpphISEwN3dHQkJCYb9wcHB0Gq1OHbsmGHbxYsXjSxUQkNDkZKSArlcXuH6zs7OJq/77LPPQqFQmBzULly4ELm5uQYLperW6erIpUcikaBr1674+OOPcezYMSiVSkO7wDD3Svl3sqwipzwtW7aEVqs1pDo21eccOHAAffv2xZAhQ9C2bVs0bdoU58+fNypzN30VcG99a0hICHQ6XYXYXAxjLugXMX18fCCX0xq4Wq02qq/6LG0BAQEIDw/HkiVL8NNPPyEiIgK+vr4AgIMHD8LX1xfvvfceOnTogGbNmhnF0zGFi4uLIe4VQHXt1KlThu93Oz5mGOb+gxU5TK0zfvx4LF68GMuWLcOFCxfwySef4MSJE1VKbRoUFITBgwdj2LBh2LhxI+Lj43HkyBHMnDkTW7ZsAQBMmDAB27dvR3x8PI4ePYrdu3ejRYsWAMhNQiKRYPPmzbhx44ZRgNg7YWlpiZdeeglTp06FEAKBgYHQaDSYP38+4uLisGLFigquV35+fsjJycHOnTuRlpZm0uUqIiICISEhGDx4MI4ePYojR45g2LBhCA8Pr+DqwjQuNm/ejPT0dIwaNQqtW7c2+jz77LNG2V4GDRqEhQsXIioqyuBWBVBAxTfeeAMTJ07EsmXLcOnSJRw9ehTz58/HsmXLbnv9Zs2aGYL8Hj9+HIMGDTJYtABA8+bNERERgdGjR+PIkSM4duwYRo8eDbVabajXERER6Ny5M/r164cdO3YgISEBBw8exHvvvYf//vvP5HV9fHzw+eefY968eXjvvfdw9uxZXLp0CXPnzsVbb72FyZMnIywsDED163R15AKAw4cPY8aMGfjvv/9w+fJlbNy4ETdu3DC0MQxTWzz88MP4/vvvER0djYSEBGzduhXvvvsuevToAVtbWwDU5+zbtw9Xr15FWloaAKrHeiuy2NhYvPzyyxWyIvr5+RkywaWlpRnVcz332rf6+flh+PDhGDlyJDZt2oT4+Hjs2bMHv/zyS00/KoZpEIwaNQobN27Er7/+aghyDFCdvHz5MtasWYNLly7h66+/vuNiwCOPPIItW7Zgy5YtOHv2LMaOHWvIKAfc3fiYYZj7lPoN0cOYE2UDvpWlfLBDUwEcp02bJpydnYW1tbUYOXKkeO2110SnTp1ue+7XX39dhIeHG74XFRWJDz/8UPj5+QmFQiE8PDzE008/LU6cOCGEEOLVV18VAQEBQqVSCRcXFzF06FCRlpZmJIO7u7uQSCSGIHF3uhc9ly9fFnK5XKxdu1YIIcTcuXOFh4eHUKvVIjIyUixfvrzCcWPGjBFOTk4CgCHwXfmAlImJieKpp54SVlZWwsbGRvTv31+kpKSYlI1pPPTp00f06tXL5L7Dhw8LAOL48eNCCCHOnDkjAAhfX1+h0+mMyup0OjFv3jwRHBwsFAqFcHFxEZGRkWLv3r1CiMrf9/j4eNGjRw+hVquFt7e3WLBgQYWAi9euXRNPPPGEUKlUwtfXV6xatUq4urqKhQsXGspkZWWJ8ePHC09PT6FQKIS3t7cYPHiwUYBvU/z222+iW7duhmDq7du3F0uWLKlQzlSdvlNgyLuRq3ywSf1zjoyMFC4uLkKlUomgoCBD8GeGqQ53G+x4xowZonPnzsLR0VFYWFiIpk2bitdee82ofzt06JBo06aNUKlUQj+0u3nzpujbt6+wtrYWrq6u4v333xfDhg0z6mvPnTsnOnXqJNRqtQAg4uPjK7QL1elby9fD/Px8MXHiROHh4SGUSqUIDAw0WacZpqFR2dj3duTl5Qk7Ozvh6OhYIbD+m2++KZycnIS1tbUYMGCA+PLLL43agfL9T1FRkRg7dqxwdHQUrq6uYubMmRX6tDuNjxmGuT+RCMG5U5m657HHHoO7uztWrFhR36IwDFMDXLlyBd7e3oZA4AzDMAzDMAzD1A4c7JipdfLy8rBw4UJERkZCJpNh9erV+OuvvxAVFVXfojEMU0127dqFnJwchISEIDk5GW+99Rb8/PzQvXv3+haNYRiGYRiGYe5rWJHD1DoSiQRbt27Fp59+ioKCAgQHB2PDhg2IiIiob9EYhqkmGo0G7777LuLi4mBjY4MuXbpg5cqVFTJGMQzDMAzDMAxTs7BrFcMwDMMwDMMwDMMwjJnAWasYhmEYhmEYhmEYhmHMBFbkMAzDMAzDMAzDMAzDmAmsyGEYhmEYhmEYhmEYhjETWJHDMAzDMAzDMAzDMAxjJrAih2EYhmEYhmEYhmEYxkxgRQ7DMAzDMAzDMAzDMIyZwIochmEYhmEYhmEYhmEYM4EVOQzDMAzDMAzDMAzDMGYCK3IYhmEYhmEYhmEYhmHMhP8DYyj/MF+JgsQAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAwgAAAKqCAYAAACepnlGAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAACKIElEQVR4nOzdeXhMd/vH8c8kskgiiSCJxJKgltiX2tValGrRKrpYq/TpKrRotbaWrrbSelpq6aMtRXWhVbVUtVpqp3YhtiCC7AnJ+f3h16PTJGTSScbE+3Vd57rMd77nzH3GkNxzfxeLYRiGAAAAAECSi6MDAAAAAHDrIEEAAAAAYCJBAAAAAGAiQQAAAABgIkEAAAAAYCJBAAAAAGAiQQAAAABgIkEAAAAAYCJBAAAAAGAiQQBwW5g3b54sFouOHTtmt2seO3ZMFotF8+bNs9s1nV2rVq3UqlUrR4cBAPgXSBAA5NmRI0c0ePBgVahQQZ6envL19VWzZs00bdo0paSkODo8u/n00081depUR4dhpV+/frJYLPL19c32vT506JAsFossFoveeecdm69/+vRpjR07Vjt27LBDtAAAZ1LE0QEAcE4rVqxQjx495OHhoT59+qhGjRpKT0/Xxo0b9cILL2jv3r368MMPHR2mXXz66afas2ePnn/+eav28uXLKyUlRW5ubg6Jq0iRIkpOTtY333yjhx56yOq5hQsXytPTU6mpqXm69unTpzVu3DiFhYWpTp06uT7vhx9+yNPrAQBuHSQIAGwWFRWlXr16qXz58lq7dq1Kly5tPvfUU0/p8OHDWrFixb9+HcMwlJqaqqJFi2Z5LjU1Ve7u7nJxcVwh1GKxyNPT02Gv7+HhoWbNmumzzz7LkiB8+umn6ty5s5YuXVogsSQnJ8vLy0vu7u4F8noAgPzDECMANnvrrbeUmJioOXPmWCUHf6lUqZKee+458/HVq1c1YcIEVaxYUR4eHgoLC9NLL72ktLQ0q/PCwsJ07733atWqVWrQoIGKFi2q//73v1q/fr0sFos+//xzjR49WqGhofLy8lJ8fLwk6ffff1fHjh3l5+cnLy8vtWzZUr/88stN7+Orr75S586dFRISIg8PD1WsWFETJkxQRkaG2adVq1ZasWKFjh8/bg7ZCQsLk5TzHIS1a9eqRYsW8vb2lr+/v+6//37t27fPqs/YsWNlsVh0+PBh9evXT/7+/vLz81P//v2VnJx809j/8vDDD+u7777TpUuXzLYtW7bo0KFDevjhh7P0j4uL0/Dhw1WzZk35+PjI19dX99xzj3bu3Gn2Wb9+ve68805JUv/+/c37/us+W7VqpRo1amjr1q2666675OXlpZdeesl87u9zEPr27StPT88s99+hQwcVL15cp0+fzvW9AgAKBhUEADb75ptvVKFCBTVt2jRX/R9//HHNnz9fDz74oIYNG6bff/9dkyZN0r59+/Tll19a9T1w4IB69+6twYMHa9CgQapSpYr53IQJE+Tu7q7hw4crLS1N7u7uWrt2re655x7Vr19fY8aMkYuLi+bOnas2bdro559/VsOGDXOMa968efLx8VFkZKR8fHy0du1avfrqq4qPj9fbb78tSXr55Zd1+fJlnTx5UlOmTJEk+fj45HjNH3/8Uffcc48qVKigsWPHKiUlRe+9956aNWumbdu2mcnFXx566CGFh4dr0qRJ2rZtm2bPnq3AwEC9+eabuXpvu3fvriFDhmjZsmUaMGCApGvVg6pVq6pevXpZ+h89elTLly9Xjx49FB4errNnz+q///2vWrZsqT///FMhISGqVq2axo8fr1dffVVPPPGEWrRoIUlWf98XLlzQPffco169eunRRx9VUFBQtvFNmzZNa9euVd++fbVp0ya5urrqv//9r3744Qd98sknCgkJydV9AgAKkAEANrh8+bIhybj//vtz1X/Hjh2GJOPxxx+3ah8+fLghyVi7dq3ZVr58eUOS8f3331v1XbdunSHJqFChgpGcnGy2Z2ZmGnfccYfRoUMHIzMz02xPTk42wsPDjbvvvttsmzt3riHJiIqKsur3T4MHDza8vLyM1NRUs61z585G+fLls/SNiooyJBlz58412+rUqWMEBgYaFy5cMNt27txpuLi4GH369DHbxowZY0gyBgwYYHXNbt26GSVKlMjyWv/Ut29fw9vb2zAMw3jwwQeNtm3bGoZhGBkZGUZwcLAxbtw4M763337bPC81NdXIyMjIch8eHh7G+PHjzbYtW7Zkube/tGzZ0pBkzJo1K9vnWrZsadW2atUqQ5Lx2muvGUePHjV8fHyMrl273vQeAQCOwRAjADb5a1hPsWLFctV/5cqVkqTIyEir9mHDhklSlrkK4eHh6tChQ7bX6tu3r9V8hB07dphDaS5cuKDY2FjFxsYqKSlJbdu21YYNG5SZmZljbH+/VkJCgmJjY9WiRQslJydr//79ubq/vztz5ox27Nihfv36KSAgwGyvVauW7r77bvO9+LshQ4ZYPW7RooUuXLhgvs+58fDDD2v9+vWKiYnR2rVrFRMTk+3wIunavIW/5m1kZGTowoUL8vHxUZUqVbRt27Zcv6aHh4f69++fq77t27fX4MGDNX78eHXv3l2enp7673//m+vXAgAULIYYAbCJr6+vpGu/UOfG8ePH5eLiokqVKlm1BwcHy9/fX8ePH7dqDw8Pz/Fa/3zu0KFDkq4lDjm5fPmyihcvnu1ze/fu1ejRo7V27dosv5Bfvnw5x2vm5K97+fuwqL9Uq1ZNq1atUlJSkry9vc32cuXKWfX7K9aLFy+a7/XNdOrUScWKFdOiRYu0Y8cO3XnnnapUqVK2ez5kZmZq2rRpev/99xUVFWU136JEiRK5ej1JCg0NtWlC8jvvvKOvvvpKO3bs0KeffqrAwMBcnwsAKFgkCABs4uvrq5CQEO3Zs8em8ywWS676ZbdiUU7P/VUdePvtt3NcijOn+QKXLl1Sy5Yt5evrq/Hjx6tixYry9PTUtm3bNGLEiBtWHuzJ1dU123bDMHJ9DQ8PD3Xv3l3z58/X0aNHNXbs2Bz7Tpw4Ua+88ooGDBigCRMmKCAgQC4uLnr++edtuucb/T1lZ/v27Tp37pwkaffu3erdu7dN5wMACg4JAgCb3Xvvvfrwww+1adMmNWnS5IZ9y5cvr8zMTB06dEjVqlUz28+ePatLly6pfPnyeY6jYsWKkq4lLe3atbPp3PXr1+vChQtatmyZ7rrrLrM9KioqS9/cJjd/3cuBAweyPLd//36VLFnSqnpgTw8//LA+/vhjubi4qFevXjn2W7JkiVq3bq05c+ZYtV+6dEklS5Y0H+f2nnMjKSlJ/fv3V0REhJo2baq33npL3bp1M1dKAgDcWpiDAMBmL774ory9vfX444/r7NmzWZ4/cuSIpk2bJuna8BdJWXYinjx5siSpc+fOeY6jfv36qlixot555x0lJiZmef78+fM5nvvXN/d//6Y+PT1d77//fpa+3t7euRpyVLp0adWpU0fz58+3WnZ0z549+uGHH8z3Ij+0bt1aEyZM0IwZMxQcHJxjP1dX1yzViS+++EKnTp2yavsrkfn7feTViBEjFB0drfnz52vy5MkKCwtT3759syxzCwC4NVBBAGCzihUr6tNPP1XPnj1VrVo1q52Uf/31V33xxRfq16+fJKl27drq27evPvzwQ3NYz+bNmzV//nx17dpVrVu3znMcLi4umj17tu655x5Vr15d/fv3V2hoqE6dOqV169bJ19dX33zzTbbnNm3aVMWLF1ffvn317LPPymKx6JNPPsl2aE/9+vW1aNEiRUZG6s4775SPj4+6dOmS7XXffvtt3XPPPWrSpIkGDhxoLnPq5+d3w6E//5aLi4tGjx5903733nuvxo8fr/79+6tp06bavXu3Fi5cqAoVKlj1q1ixovz9/TVr1iwVK1ZM3t7eatSo0Q3niGRn7dq1ev/99zVmzBhz2dW5c+eqVatWeuWVV/TWW2/ZdD0AQAFw7CJKAJzZwYMHjUGDBhlhYWGGu7u7UaxYMaNZs2bGe++9Z7VM6JUrV4xx48YZ4eHhhpubm1G2bFlj1KhRVn0M49oyp507d87yOn8tc/rFF19kG8f27duN7t27GyVKlDA8PDyM8uXLGw899JCxZs0as092y5z+8ssvRuPGjY2iRYsaISEhxosvvmguyblu3TqzX2JiovHwww8b/v7+hiRzydPsljk1DMP48ccfjWbNmhlFixY1fH19jS5duhh//vmnVZ+/ljk9f/68VXt2cWbn78uc5iSnZU6HDRtmlC5d2ihatKjRrFkzY9OmTdkuT/rVV18ZERERRpEiRazus2XLlkb16tWzfc2/Xyc+Pt4oX768Ua9ePePKlStW/YYOHWq4uLgYmzZtuuE9AAAKnsUwbJgJBwAAAKBQYw4CAAAAABMJAgAAAAATCQIAAAAAEwkCAAAAUEA2bNigLl26KCQkRBaLRcuXL7/pOevXr1e9evXk4eGhSpUqad68efkaIwkCAAAAUECSkpJUu3ZtzZw5M1f9o6Ki1LlzZ7Vu3Vo7duzQ888/r8cff1yrVq3KtxhZxQgAAABwAIvFoi+//FJdu3bNsc+IESO0YsUK7dmzx2zr1auXLl26pO+//z5f4qKCAAAAAORRWlqa4uPjrQ577hS/adMmtWvXzqqtQ4cO2rRpk91e459umZ2UV7hVcXQIuE20ntXT0SHgNtF+eVtHh4DbhKePt6NDwG3ix88aODqEbDny98gtL/fWuHHjrNrGjBmjsWPH2uX6MTExCgoKsmoLCgpSfHy8UlJSVLRoUbu8zt/dMgkCAAAA4GxGjRqlyMhIqzYPDw8HRWMfJAgAAABAHnl4eORrQhAcHKyzZ89atZ09e1a+vr75Uj2QSBAAAADg5CxuFkeHkG+aNGmilStXWrWtXr1aTZo0ybfXZJIyAAAAUEASExO1Y8cO7dixQ9K1ZUx37Nih6OhoSdeGLPXp08fsP2TIEB09elQvvvii9u/fr/fff1+LFy/W0KFD8y1GKggAAABwai5FnKeC8Mcff6h169bm47/mL/Tt21fz5s3TmTNnzGRBksLDw7VixQoNHTpU06ZNU5kyZTR79mx16NAh32IkQQAAAAAKSKtWrXSjbciy2yW5VatW2r59ez5GZY0EAQAAAE7N4saoeXvi3QQAAABgIkEAAAAAYGKIEQAAAJyaM01SdgZUEAAAAACYqCAAAADAqRXmjdIcgQoCAAAAABMJAgAAAAATQ4wAAADg1JikbF9UEAAAAACYqCAAAADAqTFJ2b6oIAAAAAAwkSAAAAAAMDHECAAAAE6NScr2RQUBAAAAgIkKAgAAAJyaxZUKgj1RQQAAAABgooIAAAAAp+ZCBcGuqCAAAAAAMJEgAAAAADAxxAgAAABOzeLCECN7ooIAAAAAwEQFAQAAAE7N4sp33vbEuwkAAADARIIAAAAAwJTnIUZbt27Vvn37JEkRERGqV6+e3YICAAAAcot9EOzL5gTh3Llz6tWrl9avXy9/f39J0qVLl9S6dWt9/vnnKlWqlL1jBAAAAFBAbB5i9MwzzyghIUF79+5VXFyc4uLitGfPHsXHx+vZZ5/NjxgBAACAHFlcLA47CiObKwjff/+9fvzxR1WrVs1si4iI0MyZM9W+fXu7BgcAAACgYNmcIGRmZsrNzS1Lu5ubmzIzM+0SFAAAAJBbzEGwL5uHGLVp00bPPfecTp8+bbadOnVKQ4cOVdu2be0aHAAAAICCZXOCMGPGDMXHxyssLEwVK1ZUxYoVFR4ervj4eL333nv5ESMAAACAAmLzEKOyZctq27Zt+vHHH7V//35JUrVq1dSuXTu7BwcAAADcjIUhRnaVp30QLBaL7r77bt199932jgcAAACAA+UqQZg+fbqeeOIJeXp6avr06Tfsy1KnAAAAKEgWF5tHzeMGcpUgTJkyRY888og8PT01ZcqUHPtZLBYSBAAAAMCJ5SpBiIqKyvbPAAAAAAoXm+sx48ePV3Jycpb2lJQUjR8/3i5BAQAAALnFTsr2ZXOCMG7cOCUmJmZpT05O1rhx4+wSFAAAAADHsHkVI8MwZLFkzZZ27typgIAAuwQFAAAA5BY7KdtXrhOE4sWLy2KxyGKxqHLlylZJQkZGhhITEzVkyJB8CRIAAABAwch1gjB16lQZhqEBAwZo3Lhx8vPzM59zd3dXWFiYmjRpki9BAgAAADkprHMBHCXXCULfvn0lSeHh4WratKnc3NzyLSgAAAAAjmHzHISWLVuaf05NTVV6errV876+vv8+KgAAAAAOYXOCkJycrBdffFGLFy/WhQsXsjyfkZFhl8AAAACA3GAnZfuy+d184YUXtHbtWn3wwQfy8PDQ7NmzNW7cOIWEhGjBggX5ESMAAACAAmJzBeGbb77RggUL1KpVK/Xv318tWrRQpUqVVL58eS1cuFCPPPJIfsQJAAAAZItJyvZlcwUhLi5OFSpUkHRtvkFcXJwkqXnz5tqwYYN9owMAAABQoGxOECpUqKCoqChJUtWqVbV48WJJ1yoL/v7+dg0OAAAAQMGyeYhR//79tXPnTrVs2VIjR45Uly5dNGPGDF25ckWTJ0/OjxgBAACAHLGTsn3ZnCAMHTrU/HO7du20f/9+bd26VZUqVVKtWrXsGhwAAACAgmVzgvBP5cuXV/ny5SVJS5Ys0YMPPvivgwIAAAByi0nK9mXTHISrV69qz549OnjwoFX7V199pdq1a7OCEQAAAODkcp0g7NmzR5UqVVLt2rVVrVo1de/eXWfPnlXLli01YMAA3XPPPTpy5Eh+xgoAAABkYXFxcdhRGOV6iNGIESNUqVIlzZgxQ5999pk+++wz7du3TwMHDtT333+vokWL5mecAAAAAApArhOELVu26IcfflCdOnXUokULffbZZ3rppZf02GOP5Wd8AAAAAApQrhOE2NhYhYSESJL8/Pzk7e2txo0b51tgAAAAQG4wSdm+cp0gWCwWJSQkyNPTU4ZhyGKxKCUlRfHx8Vb9fH197R4kAAAAgIKR6wTBMAxVrlzZ6nHdunWtHlssFmVkZNg3QgAAAOAGqCDYV64ThHXr1uVnHAAAAABuAblOEFq2bJmfcQAAAAC4BfzrnZQBAAAAR2KIkX0Vzt0dAAAAAOQJFQQAAAA4tcK6o7GjkCA4mYDmDVRh2ED51ashz5BA/fHAf3T26zWODgtOZtG2Q5q/eb8uJKWqcqC/RrSrpxqlS+TYPyE1XTN+3q21B0/qcmq6Svt6aXibumpRMcTscy4hWdN+2qVfjp5R6tUMlfX30dh7Gqp66YCCuCXcwgY+EqYu7YNVzLuIdu+L1zvvH9LJMyk59u96T2l1vSdEpYM8JUlR0cma9/lx/bY1zuzz3sTaqlvT3+q85d+d1jvvH8qXe4Bz6PtgiDq1KSkf7yLaeyBR0z4+rlMxaTn279KulLrcXUpBJT0kScdPpuiTZae1Zef1Jdw7tympNs1KqFKYl7y9XHX/wO1KSmbFRhRuNicIAwYM0LRp01SsWDGr9qSkJD3zzDP6+OOP7RYcsnL19lL8rgM6MW+pGiyZ6ehw4IRW7YvWu+t26OX29VWjdAl9+sdB/WfxT1r+eCcFeHtm6X8lI0NDFq9XgJen3r6/qQKLeen05SQV83Qz+8SnpqvfwjW6s1ygZvS4S8WLeij6YqJ8Pd0L8tZwC3rkgbJ68N5QvT51v86cTdXjj4Rp8viaevQ/W5R+xcj2nPOx6Zo1P0onT6fIYpHuaRukSS9X14DntyoqOtns9/X3pzV74THzcWpaZn7fDm5hPbsEq1vHQL31wTGdOZ+m/j1C9MbIyhrwwh5dyemzFpeu2Z+d0qmYVElS+7tKavzwShoy6k8dP3mtzcPDRVt2XtaWnZf1eO8yBXY/sI2LK3MQ7Mnmesz8+fOVkpL1m5+UlBQtWLDALkEhZ+dXbdDBMVN19qsfHR0KnNT//jig7rUq6P6aFVSxpJ9e7tBAnm5FtHx3VLb9l++KUnxquiZ3a646ZUopxM9bDcoFqkpgcbPP3N/3KdjXS+M6NVKN0iUU6u+jJuHBKlvcp6BuC7eoHveFasHi49r4+wUdOZak16bsV4kAD7VoXDLHc37ZckG/bY3TyTMpOnE6RR9+ckwpqRmKqGK9EWdqWqbiLl0xj+QUvtW9nXW/J1ALvzyjX7deUlR0it58/5hKFHdTswb+OZ7z27bL2rzjsk7FpOlUTJrmLj6llNRMVat0/f+uZd+d0+dfx2jfoaQCuAvg1pDrCkJ8fLwMw5BhGOaOyn/JyMjQypUrFRgYmC9BArCPKxkZ2hdzUQMaVzPbXCwWNSofpF2nY7M956cjp1UrpKTeWL1V6w+fUnEvD91Trbz6Naoq1/8f8/nT4dNqGhasF776RVtPnFegT1E9VLeSuteuWCD3hVtTSJCnSgZ4aMuOi2ZbUnKG/jwYrxpVfbXm5/M3vYaLi9S6WSl5erpq7/54q+fubhWo9q2DFHcxXb9svqB5i44rjSrCbal0oLtKFHfXtj3XPyNJKRnadyRJEXf4aP2mizc4+xoXi3RX4+Ly9HDRn4cS8zNc4JaX6wTB399fFotFFovFakflv1gsFo0bN86uwQGwr4vJ6cowDAV4WQ8lKuHtqWNx8dmec+pSorZcTtI9EeX13oN36cTFRE1avVVXMzM1uFkNs88XOw7r0TuraGDjCO09E6e31mxXEVcX3VcjPN/vC7emgOLXhphdvHTFqv3ipXTzuZxUKO+tWW/Xlbu7i1JSMvTS63t17MT14UWrfzqnmHOpio1LV8Uwbz3Zr4LKhRbVy5P+tP+N4JZX3O/akMeLl69atV+6fEUB/m7ZnWIKL1tU08dXlbubi1JSMzR28hFFn0rNt1iRP1jm1L5s2knZMAy1adNGS5cuVUDA9YmH7u7uKl++vEJCQm5whevS0tKUlmY9aeiKkSk3CzPQgVtN5v8nFK90aCBXFxdFBAfoXGKKFmzebyYImYYUEVxcz9xVS5JUNai4Dsde1pIdR0gQbiN3twzUC09d/wLpxfG783yt6FPJ6v/cH/LxKqJWzUrp5aFV9MyonWaS8PWqM2bfo8eTdOFiuqa/XlshwZ46HcMvd4Vdm2YBGvp4efPxy2/lfXL6idOpGjzyT3l7uequRsX14pNhihx/gCQBtzWbd1KOiopSuXLlZLHkPVObNGlSlmpDb0uAHnHNeUwqgH+vuJe7XC0WxSVb/+C7kJSqEtlMUJakkt5FVcTVxRxOJEnhJXwVm5SqKxkZcnN1VUkfT1UoYT0+PLyEr9YcPGn/m8Ata+PmC/rz4B/mY3e3a5+Z4v5uunAx3Wwv7u+uw0dvPITj6lVDp85c+5weOJKoancUU4/7QvX2zOx/EfzzwLUKWJnSRUkQbgObtl7S/sPX5wS4uV37naS4XxHF/a1i5e/npiPHkrOc/3dXMwydPnvtS8tDUcmqUsFb3TsGaeqc4/kQOfILy5zal83v5r59+/TLL7+Yj2fOnKk6dero4Ycf1sWLNx/jJ0mjRo3S5cuXrY6HXFgKEchvbq6uqhZcXL8fP2u2ZRqGNh8/q1oh2SfodcqU1ImLCco0rq8CEh2XoJLennJzdb3WJ7Skjl9MsDovOi5BpX298uEucKtKScnQqTOp5hEVnazYuDQ1qH19QrtXUVdFVPbVnv3ZD2nLicUiubnl/CPrjgrXJpX+PRFB4ZWSmqnTZ9PM4/jJVF24mK66Na5/UeFV1EXVKnrbPJ/A4nI94QBuVzYnCC+88ILi46/9x757925FRkaqU6dOioqKUmRkZK6u4eHhIV9fX6uD4UW54+rtJd/aVeVbu6okySu8jHxrV5Vn2dIOjgzO4tEGVfTlzqP6ek+Ujl6I18Qf/lDKlau6v+a1oUCjV/ym6T/tMvv3qFNJ8anpemvNNh2PS9DPR05rzm9/qme9O/52zcraffqC5mz6U9EXE/Tdn8e1dNcR9axbqcDvD7eWL74+pb49y6lZwxKqUN5boyOr6kJcmn7+7fqk+Kmv1VL3zteHqA7uE67a1f0UHOihCuW9NbhPuOrW9NcP689JkkKCPdW3ZzlVqeij4EAPNWtYQqOHVtX2PZd05Bgrzdyuln13To90La0m9f0UXraoRjwZrgsXr+iXPy6Zfd56ubLub1/KfDywV6hqVvVRUEl3hZctqoG9QlW7WjGt+eX6nhvF/YqoYvmiCgm+tldCeNmiqli+qIp5uxbYvQEFzeZ9EKKiohQRESFJWrp0qbp06aKJEydq27Zt6tSpk90DhDW/+jXUZM0n5uOId16SJJ1YsEy7Bo5yVFhwIh2qldPFlDR9sHGPLiSlqkqgv2b2aGkOMYqJT5bL34YQBvt6aWaPlnp37XY9NPd7BRYrqofrV1a/RlXNPtVLl9C7XZvrvQ279OGvexXq560X2tRVp+phBX17uMUsXHpCnp6uevHpyvLxLqLdf17WsDG7rfZACA0uKn/f6xNJi/u5afTQqioR4K6kpKs6cixJkWN264//Xw3p6lVDDeoU10P3lZGnp6vOxaZq/a+xmr+IISG3s0XfxMjTw0VDHw+Tj5er9hxI1Mg3DlrtgRAS5CG/Ytc/a/6+RTTiP+EK8HdTUnKGoqJTNPKNQ9q2+3qFq0u7QPV58HoCO3Xstf/73vogSj9suFAAd4bcYJKyfVkMw8h+95AcBAQEaOPGjYqIiFDz5s3Vp08fPfHEEzp27JgiIiKUnHzjsX45WeFWJU/nAbZqPauno0PAbaL98raODgG3CU8fb0eHgNvEj581cHQI2Tr2+P0Oe+2w2V857LXzi80VhObNmysyMlLNmjXT5s2btWjRIknSwYMHVaYMOwwCAACgYFFBsC+bB/7PmDFDRYoU0ZIlS/TBBx8oNDRUkvTdd9+pY8eOdg8QAAAAQMGxuYJQrlw5ffvtt1nap0yZYpeAAAAAAFuwzKl95endPHLkiEaPHq3evXvr3Llrq0p899132rt3r12DAwAAAFCwbE4QfvrpJ9WsWVO///67li1bpsTEa+sL79y5U2PGjLF7gAAAAAAKjs0JwsiRI/Xaa69p9erVcnd3N9vbtGmj3377za7BAQAAADdjcbE47MiLmTNnKiwsTJ6enmrUqJE2b958w/5Tp05VlSpVVLRoUZUtW1ZDhw5Vamr+7Rpvc4Kwe/dudevWLUt7YGCgYmNjszkDAAAAgCQtWrRIkZGRGjNmjLZt26batWurQ4cO5rD9f/r00081cuRIjRkzRvv27dOcOXO0aNEivfTSS/kWo80Jgr+/v86cOZOlffv27eaKRgAAAEBBsbi4OOyw1eTJkzVo0CD1799fERERmjVrlry8vPTxxx9n2//XX39Vs2bN9PDDDyssLEzt27dX7969b1p1+DdsvqtevXppxIgRiomJkcViUWZmpn755RcNHz5cffr0yY8YAQAAAKeXnp6urVu3ql27dmabi4uL2rVrp02bNmV7TtOmTbV161YzITh69KhWrlypTp065VucNi9zOnHiRD311FMqW7asMjIyFBERoYyMDD388MMaPXp0fsQIAAAA3JLS0tKUlpZm1ebh4SEPD48sfWNjY5WRkaGgoCCr9qCgIO3fvz/b6z/88MOKjY1V8+bNZRiGrl69qiFDhtxaQ4zc3d310Ucf6ciRI/r222/1v//9T/v379cnn3wiV1fX/IgRAAAAyJnF4rBj0qRJ8vPzszomTZpkt1tbv369Jk6cqPfff1/btm3TsmXLtGLFCk2YMMFur/FPNlcQ/lKuXDmVK1fOnrEAAAAATmXUqFGKjIy0asuueiBJJUuWlKurq86ePWvVfvbsWQUHB2d7ziuvvKLHHntMjz/+uCSpZs2aSkpK0hNPPKGXX35ZLvmwSZzNCUJGRobmzZunNWvW6Ny5c8rMzLR6fu3atXYLDgAAALiZvC43ag85DSfKjru7u+rXr681a9aoa9eukqTMzEytWbNGTz/9dLbnJCcnZ0kC/hq1YxhG3gO/AZsThOeee07z5s1T586dVaNGDVksjvsLAQAAAJxJZGSk+vbtqwYNGqhhw4aaOnWqkpKS1L9/f0lSnz59FBoaag5T6tKliyZPnqy6deuqUaNGOnz4sF555RV16dIl34b325wgfP7551q8eHG+zpwGAAAACqOePXvq/PnzevXVVxUTE6M6dero+++/NycuR0dHW1UMRo8eLYvFotGjR+vUqVMqVaqUunTpotdffz3fYrQYNtYmQkJCtH79elWuXNmugaxwq2LX6wE5aT2rp6NDwG2i/fK2jg4BtwlPH29Hh4DbxI+fNXB0CNk6PbS3w147ZMpnDnvt/GLzrIZhw4Zp2rRp+TbmCQAAAIDj5GqIUffu3a0er127Vt99952qV68uNzc3q+eWLVtmv+gAAACAm3DkJOXCKFcJgp+fn9Xjbt265UswAAAAABwrVwnC3Llz8zsOAAAAIE8s+bAXwO2MdxMAAACAyeZlTuvWrZvt3gcWi0Wenp6qVKmS+vXrp9atW9slQAAAAAAFx+YKQseOHXX06FF5e3urdevWat26tXx8fHTkyBHdeeedOnPmjNq1a6evvvoqP+IFAAAArFhcLA47CiObKwixsbEaNmyYXnnlFav21157TcePH9cPP/ygMWPGaMKECbr//vvtFigAAACA/GdzBWHx4sXq3TvrZhS9evXS4sWLJUm9e/fWgQMH/n10AAAAwE1QQbAvmxMET09P/frrr1naf/31V3l6ekqSMjMzzT8DAAAAcB42DzF65plnNGTIEG3dulV33nmnJGnLli2aPXu2XnrpJUnSqlWrVKdOHbsGCgAAACD/2ZwgjB49WuHh4ZoxY4Y++eQTSVKVKlX00Ucf6eGHH5YkDRkyRE8++aR9IwUAAACywz4IdmVzgiBJjzzyiB555JEcny9atGieAwIAAADgOHlKEAAAAIBbRXZ7dCHvcpUgBAQE6ODBgypZsqSKFy9+w7+EuLg4uwUHAAAAoGDlKkGYMmWKihUrJkmaOnVqfsYDAAAA2MTCHAS7ylWC0Ldv32z/DAAAAKBwyfUchPj4+Fz18/X1zXMwAAAAABwr1wmCv7//DeceGIYhi8WijIwMuwQGAAAA5EZh3dHYUXKdIKxbt878s2EY6tSpk2bPnq3Q0NB8CQwAAABAwct1gtCyZUurx66urmrcuLEqVKhg96AAAACAXGOSsl3xbgIAAAAwkSAAAAAAMP2rnZTZtQ4AAACOxiRl+8p1gtC9e3erx6mpqRoyZIi8vb2t2pctW2afyAAAAAAUuFwnCH5+flaPH330UbsHAwAAANjKYmHUvD3lOkGYO3dufsYBAAAA4Bbwr+YgAAAAAA7HHAS7oh4DAAAAwESCAAAAAMDEECMAAAA4NQs7KdsV7yYAAAAAExUEAAAAODU2SrMvKggAAAAATCQIAAAAAEwMMQIAAIBzYydlu+LdBAAAAGCiggAAAACnxiRl+6KCAAAAAMBEBQEAAADOjY3S7Ip3EwAAAICJBAEAAACAiSFGAAAAcGoWC5OU7YkKAgAAAAATFQQAAAA4NyYp2xXvJgAAAAATCQIAAAAAE0OMAAAA4NTYSdm+qCAAAAAAMFFBAAAAgHOz8J23PfFuAgAAADBRQQAAAIBzYw6CXVFBAAAAAGAiQQAAAABgYogRAAAAnJqFScp2xbsJAAAAwHTLVBBaz+rp6BBwm1g3ZJGjQ8BtYsjacY4OAbeJK1eZoInbHJOU7YoKAgAAAAATCQIAAAAA0y0zxAgAAADIC4sL33nbE+8mAAAAABMVBAAAADg3C5OU7YkKAgAAAAATFQQAAAA4N+Yg2BXvJgAAAAATCQIAAAAAE0OMAAAA4NyYpGxXVBAAAAAAmKggAAAAwKmxUZp98W4CAAAAMJEgAAAAADAxxAgAAADOzcJ33vbEuwkAAADARAUBAAAAzs2FZU7tiQoCAAAAABMJAgAAAAATQ4wAAADg1CxMUrYr3k0AAAAAJioIAAAAcG5MUrYrKggAAAAATDZXEFJSUmQYhry8vCRJx48f15dffqmIiAi1b9/e7gECAAAAN8QcBLuy+d28//77tWDBAknSpUuX1KhRI7377ru6//779cEHH9g9QAAAAKAwmTlzpsLCwuTp6alGjRpp8+bNN+x/6dIlPfXUUypdurQ8PDxUuXJlrVy5Mt/iszlB2LZtm1q0aCFJWrJkiYKCgnT8+HEtWLBA06dPt3uAAAAAQGGxaNEiRUZGasyYMdq2bZtq166tDh066Ny5c9n2T09P1913361jx45pyZIlOnDggD766COFhobmW4w2DzFKTk5WsWLFJEk//PCDunfvLhcXFzVu3FjHjx+3e4AAAADADVmcZ5Ly5MmTNWjQIPXv31+SNGvWLK1YsUIff/yxRo4cmaX/xx9/rLi4OP36669yc3OTJIWFheVrjDZXECpVqqTly5frxIkTWrVqlTnv4Ny5c/L19bV7gAAAAMCtKi0tTfHx8VZHWlpatn3T09O1detWtWvXzmxzcXFRu3bttGnTpmzP+frrr9WkSRM99dRTCgoKUo0aNTRx4kRlZGTky/1IeUgQXn31VQ0fPlxhYWFq1KiRmjRpIulaNaFu3bp2DxAAAAC4IRcXhx2TJk2Sn5+f1TFp0qRsw4yNjVVGRoaCgoKs2oOCghQTE5PtOUePHtWSJUuUkZGhlStX6pVXXtG7776r1157ze5v419sHmL04IMPqnnz5jpz5oxq165ttrdt21bdunWza3AAAADArWzUqFGKjIy0avPw8LDb9TMzMxUYGKgPP/xQrq6uql+/vk6dOqW3335bY8aMsdvr/J1NCcKVK1dUtGhR7dixI0u1oGHDhnYNDAAAALjVeXh45DohKFmypFxdXXX27Fmr9rNnzyo4ODjbc0qXLi03Nze5urqabdWqVVNMTIzS09Pl7u6e9+BzYNMQIzc3N5UrVy5fxzwBAAAANrG4OO6wgbu7u+rXr681a9aYbZmZmVqzZo05bP+fmjVrpsOHDyszM9NsO3jwoEqXLp0vyYGUhzkIL7/8sl566SXFxcXlRzwAAABAoRUZGamPPvpI8+fP1759+/Tkk08qKSnJXNWoT58+GjVqlNn/ySefVFxcnJ577jkdPHhQK1as0MSJE/XUU0/lW4w2z0GYMWOGDh8+rJCQEJUvX17e3t5Wz2/bts1uwQEAAAA35eI8y5z27NlT58+f16uvvqqYmBjVqVNH33//vTlxOTo6Wi4u17/DL1u2rFatWqWhQ4eqVq1aCg0N1XPPPacRI0bkW4w2Jwhdu3bNhzAAAACA28PTTz+tp59+Otvn1q9fn6WtSZMm+u233/I5qutsThDya7Y0AAAAkCc2zgXAjeXp3bx06ZJmz56tUaNGmXMRtm3bplOnTtk1OAAAAAAFy+YKwq5du9SuXTv5+fnp2LFjGjRokAICArRs2TJFR0drwYIF+REnAAAAgAJgcwUhMjJS/fr106FDh+Tp6Wm2d+rUSRs2bLBrcAAAAMBNWSyOOwohmxOELVu2aPDgwVnaQ0NDc9wiGgAAAIBzsHmIkYeHh+Lj47O0Hzx4UKVKlbJLUAAAAECuuTBJ2Z5sfjfvu+8+jR8/XleuXJEkWSwWRUdHa8SIEXrggQfsHiAAAACAgmNzgvDuu+8qMTFRgYGBSklJUcuWLVWpUiUVK1ZMr7/+en7ECAAAAKCA2DzEyM/PT6tXr9bGjRu1a9cuJSYmql69emrXrl1+xAcAAADcWCGdLOwoNicIf2nevLmaN29uz1gAAAAAOFieEoQ1a9ZozZo1OnfunDIzM62e+/jjj+0SGAAAAJAr7KRsVzYnCOPGjdP48ePVoEEDlS5dWhZKOgAAAEChYXOCMGvWLM2bN0+PPfZYfsQDAAAA2IZlTu3K5nczPT1dTZs2zY9YAAAAADiYzQnC448/rk8//TQ/YgEAAADgYLkaYhQZGWn+OTMzUx9++KF+/PFH1apVS25ublZ9J0+ebN8IAQAAgBthTqxd5SpB2L59u9XjOnXqSJL27Nlj94AAAAAAOE6uEoR169bldxwAAABA3rDMqV3Z/G4OGDBACQkJWdqTkpI0YMAAuwQFAAAAwDFsThDmz5+vlJSULO0pKSlasGCBXYICAAAA4Bi53gchPj5ehmHIMAwlJCTI09PTfC4jI0MrV65UYGBgvgQJAAAA5IhJynaV6wTB399fFotFFotFlStXzvK8xWLRuHHj7BocAAAAgIKV6wRh3bp1MgxDbdq00dKlSxUQEGA+5+7urvLlyyskJCRfggQAAAByxE7KdpXrBKFly5aSpKioKJUrV04WSjkAAABAoZPrBOEv586d07Rp03Tw4EFJUpUqVdS7d281aNDA7sEBAAAAN2PwxbVd2VSPefHFF9WoUSPNnj1bJ0+e1MmTJ/Xhhx+qUaNGGjFiRH7FCAAAAKCA5DpBmD9/vt577z1Nnz5dFy5c0I4dO7Rjxw7FxcVpypQpmj59OsucAgAAAE4u10OMZs6cqYkTJ+rpp5+2andzc9Ozzz6rq1evasaMGerTp4/dgwQAAAByxE7KdpXrd3Pv3r26//77c3y+a9eu2rt3r12CAgAAAOAYua4guLq6Kj09Pcfnr1y5IldXV7sEBQAAAOQaFQS7yvW7Wa9ePS1cuDDH5z/55BPVq1fPLkEBAAAAcIxcVxCGDx+url27Ki0tTcOGDVNQUJAkKSYmRu+++66mTp2qL7/8Mt8CBQAAAJD/cp0g3HvvvZoyZYqGDx+ud999V35+fpKky5cvq0iRInrnnXd077335lugAAAAQHbYB8G+bNoo7ZlnnlG3bt30xRdf6NChQ5KkypUr64EHHlDZsmXzJUAAAAAABcfmnZTLlCmjoUOH5kcsAAAAgO2YpGxXNicIyD+Lth3S/M37dSEpVZUD/TWiXT3VKF0ix/4Jqema8fNurT14UpdT01Xa10vD29RVi4ohZp9zCcma9tMu/XL0jFKvZqisv4/G3tNQ1UsHFMQtwYkFNG+gCsMGyq9eDXmGBOqPB/6js1+vcXRYuIVtWbtQm1bNUeLlWAWVraqOvUcrtEKtHPv/+cf3Wr98mi7FnlJAUHm1fWC47qjV0nx+wuNVsz2v7YMvqGnHgVZtV6+k6+OJD+nsif0a9OqXCi5XzT43hVvS1vUL9fsPc5QUf16BZarq7p6vKCQ858/a/q3facPX03T5wikFBIapVbfhqljz+mft23kjtec363mU4RHN1fPZOebjJe8P0bkT+5WUcEGeXn4Kq9ZErboNVzH/IPvfIOBgJAi3iFX7ovXuuh16uX191ShdQp/+cVD/WfyTlj/eSQHenln6X8nI0JDF6xXg5am372+qwGJeOn05ScU83cw+8anp6rdwje4sF6gZPe5S8aIeir6YKF9P94K8NTgpV28vxe86oBPzlqrBkpmODge3uL2bV2r14jfU6dGxCq1QW7//OF+fTn1c/3ntO3n7Zv2i48ThbVr24TC16R6pO2q10p7N32rxzKc16NWlCgytLEka+u7PVucc3r1B38wfrWr122e53polb6uYX6DOntifPzeIW8a+P1Zq7ZJJ6vDwOIWE1daWtfO16L2BemLs99l+1k4e2aav5gxTq66Rqliztf7c8o2WznpK/V9aplL//1mTpArVW6hTn0nm4yJFrH9WlqvcWE06DpGPXyklXDqrdUvf0vIPn9NjL36efzeL3GMOgl1Rj7lF/O+PA+peq4Lur1lBFUv66eUODeTpVkTLd0dl23/5rijFp6ZrcrfmqlOmlEL8vNWgXKCqBBY3+8z9fZ+Cfb00rlMj1ShdQqH+PmoSHqyyxX0K6rbgxM6v2qCDY6bq7Fc/OjoUOIHfVs9T3RY9VKf5AyoVUkmdHx0nN3dP7di4NNv+m3/8RJVqNFfTjgNVKqSiWnd9TqXLR2jL2uvLafv4lbI6DuxYq7AqjVS8lPWct8O7N+jI3l/U7qEX8/UecWvY/ONc1W72kGo1fUAlQyqp48Pj5ObmqV2/Zv9Z+2PtAlWo3kKN2j+ukqUr6q77nldwuQhtXf8/q36uRdytPm+e3n5Wzzds10+hFerIr0SoylSsp8YdBulU1A5lZFzJt3sFHIUE4RZwJSND+2IuqlHY9TKli8WiRuWDtOt0bLbn/HTktGqFlNQbq7eq7YzlevDj7zRn05/KyMy83ufwaUUEBeiFr35RmxnL1WveKi3beSTf7wfA7SXjarrOHN+r8IimZpvFxUXh1Zro5NEd2Z5z8ugOhVdratVWoXoznTySff/Ey7E6vPsn1WnxQJb2bxe8oq6Pvyk396zVVhQuGVfTFRO9V2HVrD9rYdWa6tTR7dmec/roDoVVbWLVFh7RXKf+8dmMPrhZ019oog/HdNCqT8coJfFijnGkJF3S3s3fqEyFunJ1dcuxH+Cs8jTE6NKlS1qyZImOHDmiF154QQEBAdq2bZuCgoIUGhpq7xgLvYvJ6cowDAV4Wf9wK+HtqWNx8dmec+pSorZcTtI9EeX13oN36cTFRE1avVVXMzM1uFkNs88XOw7r0TuraGDjCO09E6e31mxXEVcX3VcjPN/vC8DtITnxoozMDPn8Y3iHt29JxcZkXwVNvBybZTiIj29JJV3O/kuRXb8ul7uHt6rVuz68yDAMfT13lOq37KWQsJq6FHvyX94JbnV/fdb++dnxLlZCF2KOZntOYnysvH1LZumfFH/9s1ahegtVqXu3/EqW0aXzJ/TT8sla/N4gPTZikVxcXM1+65a9rW3rF+pKeopCwuuox1Oz7Hh3+Fdc+M7bnmxOEHbt2qV27drJz89Px44d06BBgxQQEKBly5YpOjpaCxYsuOk10tLSlJaWZtWWceWqPNyYEpFbmf+fULzSoYFcXVwUERygc4kpWrB5v5kgZBpSRHBxPXPXtYlbVYOK63DsZS3ZcYQEAYBT2fHLUtVsfK+KuHmYbVvWfKL01CQ16/SEAyNDYRBxZ2fzz4GhVRQYWkWzXmmn6IObraoPjdoPVO1mD+ryhdP6ZcUMfTtvhB586r+yMP4dhYzN6VZkZKT69eunQ4cOydPz+jfenTp10oYNG3J1jUmTJsnPz8/qeGflL7aGUmgU93KXq8WiuORUq/YLSakqkc0EZUkq6V1U5QKKyfVvGXN4CV/FJqXqSkbGtT4+nqpQwtfqvPASvoqJT7bzHQC4nXn5FJfFxVWJ8Res2pPiY+XjVzLbc3z8SirpH/0T42PlnU3/6IN/6EJMlOq06GHVHrX/d508skMTh9TSa09U14yXOkiSZr/2oL6aM+Lf3BJuUX991v752UlKuJClSvAXH9+SVtWCm/WXJP9SZVXUp7gunjv+j9cPUEBQuMIjmum+x6foyJ6fdDpqR95uBnZlWCwOOwojmxOELVu2aPDgwVnaQ0NDFRMTk6trjBo1SpcvX7Y6hndqZmsohYabq6uqBRfX78fPmm2ZhqHNx8+qVkj2/4HVKVNSJy4mKNMwzLbouASV9PaUm+u1cmid0JI6fjHB6rzouASV9vXKh7sAcLtyLeKu0uWr69i+TWabkZmpqP2/qUyFOtmeU6ZCHUX9rb8kRf35q8pUzNp/+8YlKl2+uoLLWi972rH3y3pi7HI9MeZLPTHmS/V+7r+SpAcGT1brbuzXUxi5FnFXcLnqOrbf+rN2fP8mhVaom+05IRXq6Nj+36zaju37VaE5fDYlKf5ijFKSLsnHr1SOfQzj2py/q1fSbbgDwDnYnCB4eHgoPj7ruPiDBw+qVKmc/yH98xq+vr5Wx+0+vOjRBlX05c6j+npPlI5eiNfEH/5QypWrur/mtaFAo1f8puk/7TL796hTSfGp6XprzTYdj0vQz0dOa85vf6pnvTv+ds3K2n36guZs+lPRFxP03Z/HtXTXEfWsW6nA7w/Ox9XbS761q8q39rVfyrzCy8i3dlV5li3t4MhwK2p8dz9t2/CFdv7ypc6fPqKV/xurK2kpqt2suyRp+ZwRWrP0XbN/w3aP6cjejdq06mPFnjmqn756T6eP7dWdbR6xum5aSqL2/bFKdf9RPZAkvxIhCgytbB4lgsIkScVLlZNvQHD+3SwcqmG7/tq5cbF2b/pSsWeOaNVnY5WenqJaTa991r6Z+6LWf3n9s9agTR9F7f1Zv6/+WBdijujnb97TmeN7VL/Vo5Kk9NQkrV36pk4d3aFLsSd1bP8mLf3gPypeqrzCI1pIkk5H7dTWdf/T2RP7dPnCKR3bv0lfz46Uf6lyOSYmgDOz+bfy++67T+PHj9fixYslSRaLRdHR0RoxYoQeeOCBm5yNnHSoVk4XU9L0wcY9upCUqiqB/prZo6U5xCgmPlkufytjBft6aWaPlnp37XY9NPd7BRYrqofrV1a/Rte/YateuoTe7dpc723YpQ9/3atQP2+90KauOlUPK+jbgxPyq19DTdZ8Yj6OeOclSdKJBcu0a+AoR4WFW1T1hp2UnBinn756T4nx5xVUtpoefv4jc4hR/IXTVuO0y1aqp26D3tG6L6dq3ZdTFBAYpoeemmHugfCXvZtXyJCh6g07C5Ckag06KTkhTj9/M/3/N0qrpp7PzDaHDMXHnZHlb7vqlqlYT/cNfEcbvp6qDV9NVvHAMD0wZKa5B4LFxVXnTx3Unt+WKzU5QT5+gQqPaKa77ntORdyu7YVQxN1TB3b8oJ+/fU9X0pLl41dKFaq3UNN7/mP2gYOxk7JdWQzjb2NUcuHy5ct68MEH9ccffyghIUEhISGKiYlRkyZNtHLlSnl7e+cpkOQ5r+bpPMBW64YscnQIuE1cXMumXSgYV64WznHQuPX0b+3oCLKXtGm5w17bu0lXh712frG5guDn56fVq1dr48aN2rVrlxITE1WvXj21a9cuP+IDAAAAbsiggmBXeR7437x5czVv3tyesQAAAABwMJsThOnTp2fbbrFY5OnpqUqVKumuu+6Sq6trtv0AAAAAuyqky406is0JwpQpU3T+/HklJyerePHikqSLFy/Ky8tLPj4+OnfunCpUqKB169apbNmydg8YAAAAQP6xecDWxIkTdeedd+rQoUO6cOGCLly4oIMHD6pRo0aaNm2aoqOjFRwcrKFDWYMaAAAAcDY2VxBGjx6tpUuXqmLFimZbpUqV9M477+iBBx7Q0aNH9dZbb7HkKQAAAAoEk5Tty+Z388yZM7p69WqW9qtXr5o7KYeEhCghISFLHwAAAAC3NpsThNatW2vw4MHavn272bZ9+3Y9+eSTatOmjSRp9+7dCg8Pt1+UAAAAQE4sFscdhZDNCcKcOXMUEBCg+vXry8PDQx4eHmrQoIECAgI0Z84cSZKPj4/efffdm1wJAAAAwK3G5jkIwcHBWr16tfbv36+DBw9KkqpUqaIqVaqYfVq3vkW32QMAAABwQ3neKK1q1aqqWrWqPWMBAAAAbMckZbvKU4Jw8uRJff3114qOjlZ6errVc5MnT7ZLYAAAAAAKns0Jwpo1a3TfffepQoUK2r9/v2rUqKFjx47JMAzVq1cvP2IEAAAAcmQU0snCjmJzPWbUqFEaPny4du/eLU9PTy1dulQnTpxQy5Yt1aNHj/yIEQAAAEABsTlB2Ldvn/r06SNJKlKkiFJSUuTj46Px48frzTfftHuAAAAAAAqOzQmCt7e3Oe+gdOnSOnLkiPlcbGys/SIDAAAAcsPi4rijELJ5DkLjxo21ceNGVatWTZ06ddKwYcO0e/duLVu2TI0bN86PGAEAAAAUEJsThMmTJysxMVGSNG7cOCUmJmrRokW64447WMEIAAAABc4Qk5TtyaYEISMjQydPnlStWrUkXRtuNGvWrHwJDAAAAEDBs2nglKurq9q3b6+LFy/mVzwAAACATQyLi8OOwsjmu6pRo4aOHj2aH7EAAAAAcDCbE4TXXntNw4cP17fffqszZ84oPj7e6gAAAADgvGyepNypUydJ0n333SfL33atMwxDFotFGRkZ9osOAAAAuJlCOtTHUWxOENatW5cfcQAAAAC4BdicILRs2TI/4gAAAADyxLCwzKk95ake8/PPP+vRRx9V06ZNderUKUnSJ598oo0bN9o1OAAAAAAFy+YEYenSperQoYOKFi2qbdu2KS0tTZJ0+fJlTZw40e4BAgAAACg4eVrFaNasWfroo4/k5uZmtjdr1kzbtm2za3AAAADAzbAPgn3ZfFcHDhzQXXfdlaXdz89Ply5dskdMAAAAABzE5gQhODhYhw8fztK+ceNGVahQwS5BAQAAALlmsTjuKIRsThAGDRqk5557Tr///rssFotOnz6thQsXavjw4XryySfzI0YAAAAABcTmZU5HjhypzMxMtW3bVsnJybrrrrvk4eGh4cOH65lnnsmPGAEAAIAcFda5AI5ic4JgsVj08ssv64UXXtDhw4eVmJioiIgI+fj45Ed8AAAAAAqQzenW//73PyUnJ8vd3V0RERFq2LAhyQEAAABQSNicIAwdOlSBgYF6+OGHtXLlSmVkZORHXAAAAECuGLI47MiLmTNnKiwsTJ6enmrUqJE2b96cq/M+//xzWSwWde3aNU+vm1s2Jwhnzpwxg3vooYdUunRpPfXUU/r111/zIz4AAACg0Fi0aJEiIyM1ZswYbdu2TbVr11aHDh107ty5G5537NgxDR8+XC1atMj3GG1OEIoUKaJ7771XCxcu1Llz5zRlyhQdO3ZMrVu3VsWKFfMjRgAAACBHzrRR2uTJkzVo0CD1799fERERmjVrlry8vPTxxx/neE5GRoYeeeQRjRs3rkC2FfhXU769vLzUoUMH3XPPPbrjjjt07NgxO4UFAAAA3PrS0tIUHx9vdaSlpWXbNz09XVu3blW7du3MNhcXF7Vr106bNm3K8TXGjx+vwMBADRw40O7xZydPCUJycrIWLlyoTp06KTQ0VFOnTlW3bt20d+9ee8cHAAAA3LImTZokPz8/q2PSpEnZ9o2NjVVGRoaCgoKs2oOCghQTE5PtORs3btScOXP00Ucf2T32nNi8zGmvXr307bffysvLSw899JBeeeUVNWnSJD9iAwAAAG7OgTsajxo1SpGRkVZtHh4edrl2QkKCHnvsMX300UcqWbKkXa6ZGzYnCK6urlq8eLE6dOggV1dXq+f27NmjGjVq2C04AAAA4Fbm4eGR64SgZMmScnV11dmzZ63az549q+Dg4Cz9jxw5omPHjqlLly5mW2ZmpqRr84IPHDiQL3OAbU4QFi5caPU4ISFBn332mWbPnq2tW7ey7CkAAAAKlPHvptUWGHd3d9WvX19r1qwxlyrNzMzUmjVr9PTTT2fpX7VqVe3evduqbfTo0UpISNC0adNUtmzZfInT5gThLxs2bNCcOXO0dOlShYSEqHv37po5c6Y9YwMAAAAKlcjISPXt21cNGjRQw4YNNXXqVCUlJal///6SpD59+ig0NFSTJk2Sp6dnltE5/v7+kpSvo3ZsShBiYmI0b948zZkzR/Hx8XrooYeUlpam5cuXKyIiIr9iBAAAAHJkOHAOgq169uyp8+fP69VXX1VMTIzq1Kmj77//3py4HB0dLRcXx1ZELIZhGLnp2KVLF23YsEGdO3fWI488oo4dO8rV1VVubm7auXPnv04Qkue8+q/OB3Jr3ZBFjg4Bt4mLa/c7OgTcJq5cdZ5fjuDc+rd2dATZO7tvq8NeO6hafYe9dn7JdQXhu+++07PPPqsnn3xSd9xxR37GBAAAAMBBcl2/2LhxoxISElS/fn01atRIM2bMUGxsbH7GBgAAANyUM+2k7AxyfVeNGzfWRx99pDNnzmjw4MH6/PPPFRISoszMTK1evVoJCQn5GScAAACAAmBz2uPt7a0BAwZo48aN2r17t4YNG6Y33nhDgYGBuu+++/IjRgAAACBHhiwOOwqjf1UXqVKlit566y2dPHlSn332mb1iAgAAAOAgdhk45erqqq5du+rrr7+2x+UAAAAAOEieN0oDAAAAbgWFdbKwo/BuAgAAADBRQQAAAIBTc6adlJ0BFQQAAAAAJioIAAAAcGqFdblRR6GCAAAAAMBEggAAAADAxBAjAAAAODWWObUv3k0AAAAAJioIAAAAcGpMUrYvKggAAAAATCQIAAAAAEwMMQIAAIBTY5KyffFuAgAAADBRQQAAAIBTY5KyfVFBAAAAAGCiggAAAACnxhwE++LdBAAAAGAiQQAAAABgYogRAAAAnBqTlO2LCgIAAAAA0y1TQWi/vK2jQ8BtYsjacY4OAbeJ4m2qOjoE3CZa/Dbd0SHgttHB0QFky7BQQbAnKggAAAAATCQIAAAAAEy3zBAjAAAAIC8MgyFG9kQFAQAAAICJCgIAAACcmsF33nbFuwkAAADARAUBAAAATo2N0uyLCgIAAAAAEwkCAAAAABNDjAAAAODUGGJkX1QQAAAAAJioIAAAAMCpUUGwLyoIAAAAAEwkCAAAAABMDDECAACAU2OIkX1RQQAAAABgooIAAAAAp2YYVBDsiQoCAAAAABMJAgAAAAATQ4wAAADg1JikbF9UEAAAAACYqCAAAADAqVFBsC8qCAAAAABMVBAAAADg1Kgg2BcVBAAAAAAmEgQAAAAAJoYYAQAAwKmxk7J9UUEAAAAAYKKCAAAAAKeWySRlu6KCAAAAAMBEggAAAADAxBAjAAAAODX2QbAvKggAAAAATFQQAAAA4NRY5tS+qCAAAAAAMFFBAAAAgFNjDoJ9UUEAAAAAYCJBAAAAAGBiiBEAAACcGpOU7YsKAgAAAAATFQQAAAA4NSYp2xcVBAAAAAAmEgQAAAAAJoYYAQAAwKkxSdm+bK4gzJ07V1988UWW9i+++ELz58+3S1AAAAAAHMPmBGHSpEkqWbJklvbAwEBNnDjRLkEBAAAAuZXpwKMwsjlBiI6OVnh4eJb28uXLKzo62i5BAQAAAHAMmxOEwMBA7dq1K0v7zp07VaJECbsEBQAAAOSWYVgcdhRGNicIvXv31rPPPqt169YpIyNDGRkZWrt2rZ577jn16tUrP2IEAAAAUEBsXsVowoQJOnbsmNq2basiRa6dnpmZqT59+jAHAQAAAHByNicI7u7uWrRokSZMmKCdO3eqaNGiqlmzpsqXL58f8QEAAAA3xE7K9pXnfRAqV66sypUr2zMWAAAAAA6WqwQhMjJSEyZMkLe3tyIjI2/Yd/LkyXYJDAAAAMgNZ5ssPHPmTL399tuKiYlR7dq19d5776lhw4bZ9v3oo4+0YMEC7dmzR5JUv359TZw4Mcf+9pCrBGH79u26cuWK+WcAAAAAtlu0aJEiIyM1a9YsNWrUSFOnTlWHDh104MABBQYGZum/fv169e7dW02bNpWnp6fefPNNtW/fXnv37lVoaGi+xGgxDMPIlyvbqHmXnxwdAm4TQ168y9Eh4DZRvE1VR4eA20SL36Y7OgTcJnzrd3B0CNn65c9Eh712swgfm/o3atRId955p2bMmCHp2mI/ZcuW1TPPPKORI0fe9PyMjAwVL15cM2bMUJ8+ffIU883YvMzpgAEDlJCQkKU9KSlJAwYMsEtQAAAAQG4ZsjjssEV6erq2bt2qdu3amW0uLi5q166dNm3alKtrJCcn68qVKwoICLDptW1hc4Iwf/58paSkZGlPSUnRggUL7BIUAAAA4AzS0tIUHx9vdaSlpWXbNzY2VhkZGQoKCrJqDwoKUkxMTK5eb8SIEQoJCbFKMuwt1wlCfHy8Ll++LMMwlJCQYPUmXLx4UStXrsx23BQAAACQnzINxx2TJk2Sn5+f1TFp0qR8uc833nhDn3/+ub788kt5enrmy2tINixz6u/vL4vFIovFku3yphaLRePGjbNrcAAAAMCtbNSoUVlW+fTw8Mi2b8mSJeXq6qqzZ89atZ89e1bBwcE3fJ133nlHb7zxhn788UfVqlXr3wV9E7lOENatWyfDMNSmTRstXbrUatyTu7u7ypcvr5CQkHwJEgAAAMiJIzdK8/DwyDEh+Cd3d3fVr19fa9asUdeuXSVdm6S8Zs0aPf300zme99Zbb+n111/XqlWr1KBBA3uEfUO5ThBatmwpSYqKilK5cuVksTjXerMAAACAo0VGRqpv375q0KCBGjZsqKlTpyopKUn9+/eXJPXp00ehoaHmMKU333xTr776qj799FOFhYWZcxV8fHzk42PbCkq5ZfMk5X379umXX34xH8+cOVN16tTRww8/rIsXL9o1OAAAAKAw6dmzp9555x29+uqrqlOnjnbs2KHvv//enLgcHR2tM2fOmP0/+OADpaen68EHH1Tp0qXN45133sm3GG3eB6FmzZp688031alTJ+3evVsNGjTQsGHDtG7dOlWtWlVz587NUyDsg4CCwj4IKCjsg4CCwj4IKCi36j4I6/dkXWGzoLSqUdRhr51fcj3E6C9RUVGKiIiQJC1dulRdunTRxIkTtW3bNnXq1MnuAQIAAAAoODYPMXJ3d1dycrIk6ccff1T79u0lSQEBAYqPj7dvdAAAAMBNGIbjjsLI5gpC8+bNFRkZqWbNmmnz5s1atGiRJOngwYMqU6aM3QMEAAAAUHBsriDMmDFDRYoU0ZIlS/TBBx8oNDRUkvTdd9+pY8eOdg8QAAAAQMGxuYJQrlw5ffvtt1nap0yZYpeAAAAAAFtkOnAfhMIoVwlCfHy8fH19zT/fyF/9AAAAADifXCUIxYsX15kzZxQYGCh/f/9sN0kzDEMWi0UZGRl2DxIAAADIiWFQQbCnXCUIa9euVUBAgCRp3bp1+RrQ7W7gI2Hq0j5YxbyLaPe+eL3z/iGdPJPz2r5d7ymtrveEqHSQpyQpKjpZ8z4/rt+2xpl93ptYW3Vr+ludt/y703rn/UP5cg+4tWxZu1CbVs1R4uVYBZWtqo69Ryu0Qq0c+//5x/dav3yaLsWeUkBQebV9YLjuqNXSfH7C49mv7d/2wRfUtONAq7arV9L18cSHdPbEfg169UsFl6tmn5tCoRLQvIEqDBsov3o15BkSqD8e+I/Ofr3G0WHBySz+YYP+9+1aXbgcrzvKheqFvg+qeqXyNz3vh1+36uUZ89Wyfk29M2yQ2T521v+0YsNmq76Na1XVeyP/Y/fYgVtNrhKEli2v/3IQHh6usmXLZqkiGIahEydO2De628wjD5TVg/eG6vWp+3XmbKoefyRMk8fX1KP/2aL0K9mvo3U+Nl2z5kfp5OkUWSzSPW2DNOnl6hrw/FZFRSeb/b7+/rRmLzxmPk5Ny8zv28EtYO/mlVq9+A11enSsQivU1u8/ztenUx/Xf177Tt6+JbL0P3F4m5Z9OExtukfqjlqttGfzt1o882kNenWpAkMrS5KGvvuz1TmHd2/QN/NHq1r99lmut2bJ2yrmF6izJ/bnzw2iUHD19lL8rgM6MW+pGiyZ6ehw4IR+2LRNU//3pUYO6Kkalcrrs+9+0jNvvK8l745WgF+xHM87ff6Cpn26XHWrVsz2+Sa1q+nVwY+Yj92L2Dx1EwWksC436ig2r2IUHh6u8+fPZ2mPi4tTeHi4XYK6XfW4L1QLFh/Xxt8v6MixJL02Zb9KBHioReOSOZ7zy5YL+m1rnE6eSdGJ0yn68JNjSknNUEQV67kgqWmZirt0xTySUxgKdjv4bfU81W3RQ3WaP6BSIZXU+dFxcnP31I6NS7Ptv/nHT1SpRnM17ThQpUIqqnXX51S6fIS2rF1o9vHxK2V1HNixVmFVGql4qbJW1zq8e4OO7P1F7R56MV/vEc7v/KoNOjhmqs5+9aOjQ4GT+nTlOnVt3VT3tWqsCmVKa9TAh+Tp4a6vf/otx3MyMjP1yswFeuKBTgoJzPqFiXQtISjp72sevj5e+XULwC3F5gThr7kG/5SYmChPT0+7BHU7CgnyVMkAD23ZcdFsS0rO0J8H41Wjau4mfru4SG1blJKnp6v27reeTH53q0B9u7CpFsxooMF9wuXhYfNfPZxMxtV0nTm+V+ERTc02i4uLwqs10cmjO7I95+TRHQqv1tSqrUL1Zjp5JPv+iZdjdXj3T6rT4oEs7d8ueEVdH39Tbu78vwAg/1y5elX7o06oYY0qZpuLi4sa1qii3Yeicjxv9rLvFeBbTPe3bpJjn637Dqv9kJf0wLDX9MacRbqUkGTX2IFbVa5rZZGRkZIki8WiV155RV5e17PojIwM/f7776pTp47dA7xdBBR3lyRdvHTFqv3ipXTzuZxUKO+tWW/Xlbu7i1JSMvTS63t17MT14UWrfzqnmHOpio1LV8Uwbz3Zr4LKhRbVy5P+tP+N4JaRnHhRRmaGfP4xlMjbt6RiY7L/oZl4OTbL0CMf35JKuhybbf9dvy6Xu4e3qtW7PrzIMAx9PXeU6rfspZCwmroUe/Jf3gkA5OxSQpIyMjOzDCUK8CumY6fPZnvOjv1H9PX6TVo4cUSO121aq5pa31lboaVK6OTZWL2/+Bs99+YH+nh8pFxd+JLtVmOwzKld5TpB2L59u6RrP/x3794td/frv7S6u7urdu3aGj58eK6ulZaWprS0NKu2zIx0ubje+BfhwuTuloF64anK5uMXx+/O87WiTyWr/3N/yMeriFo1K6WXh1bRM6N2mknC16vOmH2PHk/ShYvpmv56bYUEe+p0TGrebwK3vR2/LFXNxveqiJuH2bZlzSdKT01Ss05PODAyAMheUkqqxnzwiV56vLf8fX1y7Ne+aX3zz5XKhahSuRB1GzpeW/88ZFWtAAqjXCcIf61e1L9/f02bNu1f7XcwadIkjRs3zqqt7B19Va5K/zxf09ls3HxBfx78w3zs7nbt24ji/m66cDHdbC/u767DRxNveK2rVw2dOnPtF/0DRxJV7Y5i6nFfqN6emf0qRX8euDb8qEzpoiQIhZiXT3FZXFyVGH/Bqj0pPlY+ftnPa/HxK6mkf/RPjI+Vdzb9ow/+oQsxUeo+2HqTxKj9v+vkkR2aOMR6paTZrz2omo3u1f0D38zL7QBAtvyLecvVxUVxlxOs2uMuJ6iEf9YJyifPxur0+TgNe+dDsy3z/2e4Nn70eS1592WVCSqV5bwyQSXlX8xbJ8/GkiDcgjKZpGxXNk/Hnzt37r9+0VGjRplDlv7Ssdfv//q6ziQlJUOn/jFRODYuTQ1qF9fhqGtjHL2Kuiqisq+Wrzxt07UtFsnNLefy5x0Vrn1j8vdEBIWPaxF3lS5fXcf2bVLVuu0kSUZmpqL2/6Y7Wz+S7TllKtRR1L5NanR3X7Mt6s9fVaZinSx9t29cotLlqyu4rPWypx17v6zW3Z4zHydcOqdPpzyuBwZPVmh4bTvcGQBc51akiKqGl9WWvQfV6s5rX0xkZmZqy94D6tH+riz9w0KC9NmbI63aZi1eoaTUNA3r011BJYpn+zpnL1zU5cRklfBnQ1gUfnlar+uPP/7Q4sWLFR0drfR0618yly1bdtPzPTw85OHhYdV2Ow0vyskXX59S357ldOJ0yrVlTh8N04W4NP382/Xx31Nfq6UNm2K1bMW1pGFwn3D9tjVOZ8+nyqtoEd3dMlB1a/orcsy1IUshwZ66u2WgfvsjTpcTrqhimI+efbyitu+5pCPHmGxV2DW+u5+++nikSpevoZDwWtr843xdSUtR7WbdJUnL54xQMf9AtX1gmCSpYbvHtODtPtq06mPdUauV9m5eodPH9qpzn/FW101LSdS+P1bp7oeyjt/1KxFi9djd49p8peKlysk3IDg/bhNOztXbS96VypmPvcLLyLd2VaXHXVbqiTM3OBO45uFOrTVu1v9UrUJZVa9YXp99t14pqenq0rKRJGnM+5+oVICfnu51nzzc3VSprPX/Uz7eRSXJbE9OTdNHS79Tm4a1VcLfVyfPxuq9T79S2aCSalIr+71ggMLE5gTh888/V58+fdShQwf98MMPat++vQ4ePKizZ8+qW7du+RHjbWPh0hPy9HTVi09Xlo93Ee3+87KGjdlttQdCaHBR+fu6mY+L+7lp9NCqKhHgrqSkqzpyLEmRY3brj/9fDenqVUMN6hTXQ/eVkaenq87Fpmr9r7Gav+h4gd8fCl71hp2UnBinn756T4nx5xVUtpoefv4jc4hR/IXTVquSla1UT90GvaN1X07Vui+nKCAwTA89NcPcA+EvezevkCFD1Rt2LtD7QeHkV7+Gmqz5xHwc8c5LkqQTC5Zp18BRjgoLTqR9k3q6FJ+o/y5ZqQuX4lW5fBlNH/mkSvhd+7Y/5sJFWVxyP4nVxcWiw9GnteLnzUpISlGp4n5qVLOqhjzUSe5ubje/AAocOynbl8UwbNtaolatWho8eLCeeuopFStWTDt37lR4eLgGDx6s0qVLZ5lbkFvNu/yUp/MAWw15MWvJGcgPxdvwTSMKRovfpjs6BNwmfOt3cHQI2fpu+5Wbd8on99QtfEmjzet0HTlyRJ07X/vW0N3dXUlJSbJYLBo6dKg+/PDDm5wNAAAA2JdhOO4ojGxOEIoXL66EhGsrBYSGhmrPnj2SpEuXLik5OflGpwIAAAC4xdk8B+Guu+7S6tWrVbNmTfXo0UPPPfec1q5dq9WrV6tt27b5ESMAAACQo0w2SrMrmxOEGTNmKDX12tr5L7/8stzc3PTrr7/qgQce0OjRo+0eIAAAAICCY3OCEBAQYP7ZxcVFI0eOvEFvAAAAAM7E5jkIrq6uOnfuXJb2CxcuyNXV1S5BAQAAALnFJGX7sjlByGlV1LS0NLm7s9kZAAAA4MxyPcRo+vRrayxbLBbNnj1bPj4+5nMZGRnasGGDqlZlzW8AAAAULDZKs69cJwhTpkyRdK2CMGvWLKvhRO7u7goLC9OsWbPsHyEAAACAApPrBCEqKkqS1Lp1ay1btkzFixfPt6AAAAAAOIbNqxitW7fO6vHVq1eVmppqNeQIAAAAKCiZhXSysKPkepLyN998o3nz5lm1vf766/Lx8ZG/v7/at2+vixcv2js+AAAAAAUo1wnC5MmTlZSUZD7+9ddf9eqrr+qVV17R4sWLdeLECU2YMCFfggQAAABywjKn9pXrBGHv3r1q2rSp+XjJkiW6++679fLLL6t79+5699139c033+RLkAAAAAAKRq4ThISEBJUoUcJ8vHHjRrVt29Z8XL16dZ0+fdq+0QEAAAAoULlOEEJDQ7Vv3z5JUmJionbu3GlVUbhw4YK8vLzsHyEAAABwA4YsDjsKo1wnCD169NDzzz+vTz75RIMGDVJwcLAaN25sPv/HH3+oSpUq+RIkAAAAgIKR62VOX331VZ06dUrPPvusgoOD9b///c9qs7TPPvtMXbp0yZcgAQAAgJywzKl95TpBKFq0qBYsWJDj8//cHwEAAACA88n1EKPsvPHGG7p06ZKdQgEAAABsxzKn9vWvEoSJEycqLi7OXrEAAAAAcLB/lSAYhTVtAgAAAG5TuU4Qxo8fr+Tk5PyMBQAAALAZQ4zsK9cJwrhx45SYmGjV9ueff6p8+fJ2DwoAAACAY+R6FaPshhOVLVvWrsEAAAAAtso0CueGZY5i0xwEi4U3HwAAACjMcl1BkKTKlSvfNElgVSMAAADAedmUIIwbN05+fn75FQsAAABgs8I6WdhRbEoQevXqpcDAwPyKBQAAAICD5TpBYP4BAAAAbkVUEOwr15OU2RQNAAAAKPxyXUHIzMzMzzgAAACAPMnke2y7smmZUwAAAACFGwkCAAAAAJNNqxgBAAAAtxqDnZTtigoCAAAAABMVBAAAADg1Ftu0LyoIAAAAAEwkCAAAAABMDDECAACAU2MfBPuiggAAAADARAUBAAAATo1JyvZFBQEAAACAiQoCAAAAnBoVBPuiggAAAADARIIAAAAAwMQQIwAAADg1ljm1LyoIAAAAAExUEAAAAODUmKRsX1QQAAAAAJhIEAAAAACYGGIEAAAAp5aZ6egIChcqCAAAAABMVBAAAADg1JikbF9UEAAAAACYqCAAAADAqVFBsC8qCAAAAABMJAgAAAAATAwxAgAAgFPLZIiRXVFBAAAAAGCiggAAAACnZjh0lrLFga+dP6ggAAAAADCRIAAAAAAwkSAAAADAqRmG4468mDlzpsLCwuTp6alGjRpp8+bNN+z/xRdfqGrVqvL09FTNmjW1cuXKvL1wLpEgAAAAAAVk0aJFioyM1JgxY7Rt2zbVrl1bHTp00Llz57Lt/+uvv6p3794aOHCgtm/frq5du6pr167as2dPvsVIggAAAACnlpnpuMNWkydP1qBBg9S/f39FRERo1qxZ8vLy0scff5xt/2nTpqljx4564YUXVK1aNU2YMEH16tXTjBkz/uW7ljMSBAAAACCP0tLSFB8fb3WkpaVl2zc9PV1bt25Vu3btzDYXFxe1a9dOmzZtyvacTZs2WfWXpA4dOuTY3x5IEAAAAODUHDkHYdKkSfLz87M6Jk2alG2csbGxysjIUFBQkFV7UFCQYmJisj0nJibGpv72wD4IAAAAQB6NGjVKkZGRVm0eHh4OisY+SBAAAACAPPLw8Mh1QlCyZEm5urrq7NmzVu1nz55VcHBwtucEBwfb1N8eGGIEAAAAp5ZpOO6whbu7u+rXr681a9Zcjz0zU2vWrFGTJk2yPadJkyZW/SVp9erVOfa3ByoIAAAAQAGJjIxU37591aBBAzVs2FBTp05VUlKS+vfvL0nq06ePQkNDzXkMzz33nFq2bKl3331XnTt31ueff64//vhDH374Yb7FeMskCJ4+3o4OAbeJK1ctjg4Bt4kWv013dAi4Tfzc+FlHh4DbROcrBxwdQrbyumGZI/Ts2VPnz5/Xq6++qpiYGNWpU0fff/+9ORE5OjpaLi7XB/k0bdpUn376qUaPHq2XXnpJd9xxh5YvX64aNWrkW4y3TIIAAAAA3A6efvppPf3009k+t379+ixtPXr0UI8ePfI5quuYgwAAAADARAUBAAAATs2wdbawXRW+octUEAAAAACYqCAAAADAqTm0gFAIUUEAAAAAYKKCAAAAAKfmTMucOgMqCAAAAABMJAgAAAAATAwxAgAAgFPLZJayXVFBAAAAAGCiggAAAACnxiRl+6KCAAAAAMBEggAAAADAxBAjAAAAODWGGNkXFQQAAAAAJioIAAAAcGqZlBDsigoCAAAAABMJAgAAAAATQ4wAAADg1IxMR0dQuFBBAAAAAGCiggAAAACnZjBJ2a6oIAAAAAAwUUEAAACAU8tkDoJdUUEAAAAAYCJBAAAAAGBiiBEAAACcGpOU7YsKAgAAAAATFQQAAAA4tUwKCHZFBQEAAACAiQQBAAAAgIkhRgAAAHBqBmOM7IoKAgAAAAATFQQAAAA4NVY5tS8qCAAAAABMVBAAAADg1DKZg2BXVBAAAAAAmEgQAAAAAJgYYgQAAACnZjBL2a6oIAAAAAAwUUEAAACAUzMyHR1B4UIFAQAAAICJBAEAAACAiSFGAAAAcGqZTFK2KyoIAAAAAExUEAAAAODUWObUvqggAAAAADBRQQAAAIBTy8ykgmBPVBAAAAAAmEgQAAAAAJgYYgQAAACnxhxl+6KCAAAAAMBEBQEAAABOzWCSsl1RQQAAAABgIkEAAAAAYMpzgnDkyBGNHj1avXv31rlz5yRJ3333nfbu3Wu34AAAAICbyTQMhx2FUZ4ShJ9++kk1a9bU77//rmXLlikxMVGStHPnTo0ZM8auAQIAAAAoOHlKEEaOHKnXXntNq1evlru7u9nepk0b/fbbb3YLDgAAALgZI9Nw2FEY5SlB2L17t7p165alPTAwULGxsf86KAAAAACOkadlTv39/XXmzBmFh4dbtW/fvl2hoaF2CQwAAADIjcL6Tb6j5KmC0KtXL40YMUIxMTGyWCzKzMzUL7/8ouHDh6tPnz72jhEAAABAAclTgjBx4kRVrVpVZcuWVWJioiIiInTXXXepadOmGj16tL1jBAAAAFBA8jTEyN3dXR999JFeeeUV7dmzR4mJiapbt67uuOMOe8cHAAAA3BAjjOwrTwnCX8qVK6dy5crZKxYAAAAADpanBGHAgAE3fP7jjz/OUzAAAACArZikbF95ShAuXrxo9fjKlSvas2ePLl26pDZt2tglMAAAAAAFL08JwpdffpmlLTMzU08++aQqVqz4r4MCAAAA4Bh5WsUo2wu5uCgyMlJTpkyx1yUBAACAmzIMw2FHYWS3BEGSjhw5oqtXr9rzkgAAAAAKUJ6GGEVGRlo9NgxDZ86c0YoVK9S3b1+7BAYAAADkRiaTlO0qTwnC9u3brR67uLioVKlSevfdd2+6whFurO+DIerUpqR8vIto74FETfv4uE7FpOXYv0u7UupydykFlfSQJB0/maJPlp3Wlp3xZp/ObUqqTbMSqhTmJW8vV90/cLuSkjPy/V5wa9i6fqF+/2GOkuLPK7BMVd3d8xWFhNfKsf/+rd9pw9fTdPnCKQUEhqlVt+GqWLOl+fy380Zqz2/W85DCI5qr57NzzMdL3h+icyf2Kynhgjy9/BRWrYladRuuYv5B9r9B3NIW/7BB//t2rS5cjtcd5UL1Qt8HVb1S+Zue98OvW/XyjPlqWb+m3hk2yGwfO+t/WrFhs1XfxrWq6r2R/7F77Ch8Apo3UIVhA+VXr4Y8QwL1xwP/0dmv1zg6LOCWk6cEYd26dfaOA5J6dglWt46BeuuDYzpzPk39e4TojZGVNeCFPbpyJfvM+HxcumZ/dkqnYlIlSe3vKqnxwytpyKg/dfzktTYPDxdt2XlZW3Ze1uO9yxTY/cDx9v2xUmuXTFKHh8cpJKy2tqydr0XvDdQTY7+Xt2+JLP1PHtmmr+YMU6uukapYs7X+3PKNls56Sv1fWqZSoZXNfhWqt1CnPpPMx0WKuFtdp1zlxmrScYh8/Eop4dJZrVv6lpZ/+Jwee/Hz/LtZ3HJ+2LRNU//3pUYO6Kkalcrrs+9+0jNvvK8l745WgF+xHM87ff6Cpn26XHWrZr/oRZPa1fTq4EfMx+5F/tWWPriNuHp7KX7XAZ2Yt1QNlsx0dDiwo8I6F8BR7DoHAf9O93sCtfDLM/p16yVFRafozfePqURxNzVr4J/jOb9tu6zNOy7rVEyaTsWkae7iU0pJzVS1Sj5mn2XfndPnX8do36GkArgL3Eo2/zhXtZs9pFpNH1DJkErq+PA4ubl5atevS7Pt/8faBapQvYUatX9cJUtX1F33Pa/gchHauv5/Vv1ci7jLx6+UeXh6+1k937BdP4VWqCO/EqEqU7GeGncYpFNRO5SRcSXf7hW3nk9XrlPX1k11X6vGqlCmtEYNfEieHu76+qffcjwnIzNTr8xcoCce6KSQwKxJrHQtISjp72sevj5e+XULKGTOr9qgg2Om6uxXPzo6FOCWluuvXerWrSuLxZKrvtu2bctzQLer0oHuKlHcXdv2XB8alJSSoX1HkhRxh4/Wb7p4g7OvcbFIdzUuLk8PF/15KDE/w4UTyLiarpjovWrScbDZZnFxUVi1pjp1dHu255w+ukN3tutn1RYe0VwHd1j/MI0+uFnTX2giTy9fla/SWHfd97yK+hTP9popSZe0d/M3KlOhrlxd3f7dTcFpXLl6VfujTqjffXebbS4uLmpYo4p2H4rK8bzZy75XgG8x3d+6ibYfOJJtn637Dqv9kJdUzNtLd0bcoSEP3Sv/Yt52vwcAuF3lOkHo2rVrPoaB4n7XfnG6eNl6FahLl68owP/Gv1SFly2q6eOryt3NRSmpGRo7+YiiT6XmW6xwDsmJF2VkZmQZSuRdrIQuxBzN9pzE+Fh5+5bM0j8pPtZ8XKF6C1Wpe7f8SpbRpfMn9NPyyVr83iA9NmKRXFxczX7rlr2tbesX6kp6ikLC66jHU7PseHe41V1KSFJGZmaWoUQBfsV07PTZbM/Zsf+Ivl6/SQsnjsjxuk1rVVPrO2srtFQJnTwbq/cXf6Pn3vxAH4+PlKsLRXHgdsVOyvaV6wRhzJgxdnvRtLQ0paVZT7zNzEiXi6t7DmcUPm2aBWjo49cn6r381qE8X+vE6VQNHvmnvL1cdVej4nrxyTBFjj9AkoB8EXFnZ/PPgaFVFBhaRbNeaafog5sVVrWJ+Vyj9gNVu9mDunzhtH5ZMUPfzhuhB5/6b64rkbi9JKWkaswHn+ilx3vL39cnx37tm9Y3/1ypXIgqlQtRt6HjtfXPQ2pYo0pBhAoAhZ5DZnZNmjRJ48aNs2oLrz5IFWo+4YhwHGLT1kvaf/j6nAA3t2u/NBX3K6K4S9fHafv7uenIseQbXutqhqHTZ68lXIeiklWlgre6dwzS1DnH8yFyOAsvn+KyuLgqKf6CVXtSwoUsVYK/+PiWtKoW3Ky/JPmXKquiPsV18dxxqwTByydAXj4BCggKV4nSFfX+qJY6HbVDoRXq/ou7grPwL+YtVxcXxV1OsGqPu5ygEv5ZJyifPBur0+fjNOydD822zP+fdNj40ee15N2XVSaoVJbzygSVlH8xb508G0uCANzGqCDYV54ShIyMDE2ZMkWLFy9WdHS00tPTrZ6Pi4u74fmjRo3KspdC18f35CUUp5WSmqmUVOsqyoWL6apbw1dHjqdIkryKuqhaRW99s/qcTde2uFxPOHD7ci3iruBy1XVs/yZVrtNOkmRkZur4/k2q1+rRbM8JqVBHx/b/pjvb9jPbju37VaEV6uT4OvEXY5SSdEk+fll/efuLYWRKkq5eSc+xDwoXtyJFVDW8rLbsPahWd15bVjczM1Nb9h5Qj/Z3ZekfFhKkz94cadU2a/EKJaWmaVif7goqkf0cl7MXLupyYrJK+Pva/yYA4DaVpwRh3Lhxmj17toYNG6bRo0fr5Zdf1rFjx7R8+XK9+uqrNz3fw8NDHh4eVm230/CinCz77pwe6Vpap2JSFXMuXf16hOjCxSv65Y9LZp+3Xq6sX7Zc1Fc/nJckDewVqs07LutcbLq8irqqTbMA1a5WTCPfuD5kqbhfEQX4uykk+Np7Hl62qFJSM3QuNl0JSeyHUJg1bNdf384bodLla6h0WC39sXa+0tNTVKtpd0nSN3NfVDH/ILXqNkyS1KBNH3367mP6ffXHqlSzpf7cslJnju9Rx0fGS5LSU5O0ccUMVanbQd6+JXUp9oTWLXtbxUuVV3hEC0nS6aidOnNst8pUqi9PL19dPB+tn7+eJv9S5age3GYe7tRa42b9T9UqlFX1iuX12XfrlZKari4tG0mSxrz/iUoF+OnpXvfJw91NlcqGWJ3v411Uksz25NQ0fbT0O7VpWFsl/H118mys3vv0K5UNKqkmtaoW7M3BKbl6e8m7UjnzsVd4GfnWrqr0uMtKPXHGgZEBt5Y8JQgLFy7URx99pM6dO2vs2LHq3bu3KlasqFq1aum3337Ts88+a+84bwuLvomRp4eLhj4eJh8vV+05kKiRbxy02gMhJMhDfsWuT1r29y2iEf8JV4C/m5KSMxQVnaKRbxzStt3XV0Pq0i5QfR68/oN36thrP0jf+iBKP2ywHn6CwqVag05KTojTz99M//+N0qqp5zOzzSFD8XFnZLFcn9hZpmI93TfwHW34eqo2fDVZxQPD9MCQmeYeCBYXV50/dVB7fluu1OQE+fgFKjyime667zkVcbuW5Bdx99SBHT/o52/f05W0ZPn4lVKF6i3U9J7/mH1we2jfpJ4uxSfqv0tW6sKleFUuX0bTRz6pEn7Xvu2PuXBRFpfcVztdXCw6HH1aK37erISkFJUq7qdGNatqyEOd5O7GClm4Ob/6NdRkzSfm44h3XpIknViwTLsGjnJUWLCDTPZBsCuLkYedJby9vbVv3z6VK1dOpUuX1ooVK1SvXj0dPXpUdevW1eXLl20OpF3vP2w+B8iLR55o4OgQcJt4wHeVo0PAbeLnxnwxh4LR+coBR4eQrX5js18drSDMGxvksNfOL3laE65MmTI6c+ZaKa5ixYr64YcfJElbtmzJMnQIAAAAyE9GpuGwI7/ExcXpkUceka+vr/z9/TVw4EAlJua8z1VcXJyeeeYZValSRUWLFlW5cuX07LPP5umL+zwlCN26ddOaNWskSc8884xeeeUV3XHHHerTp48GDBiQl0sCAAAA+H+PPPKI9u7dq9WrV+vbb7/Vhg0b9MQTOa/4efr0aZ0+fVrvvPOO9uzZo3nz5un777/XwIEDbX5tm4YYzZgxQ48++qj8/f2t2jdt2qRNmzbpjjvuUJcuXWwOQmKIEQoOQ4xQUBhihILCECMUlFt1iFGfVxw3yXzBhNJ2v+a+ffsUERGhLVu2qEGDa7+3fP/99+rUqZNOnjypkJCQm1zhmi+++EKPPvqokpKSVKRI7qce21RBePnllxUSEqJHHnlEa9euNdubNGmiyMjIPCcHAAAAgDNKS0tTfHy81fHPDYFttWnTJvn7+5vJgSS1a9dOLi4u+v3333N9ncuXL8vX19em5ECyMUGIiYnRrFmzdPr0ad19990KDw/XhAkTdOLECZteFAAAACgMJk2aJD8/P6tj0qRJ/+qaMTExCgwMtGorUqSIAgICFBMTk6trxMbGasKECTcclpQTmxKEokWLqk+fPlq3bp0OHTqkxx57THPmzFF4eLg6duyoL774QleuXLn5hQAAAAA7ycw0HHaMGjVKly9ftjpGjcp+2dyRI0fKYrHc8Ni/f/+/fj/i4+PVuXNnRUREaOzYsTafn6d9ECSpQoUKGj9+vMaNG6cff/xR8+bNU79+/eTt7a1z52zb+RcAAABwRtltAJyTYcOGqV+/fjfsU6FCBQUHB2f5ffrq1auKi4tTcHDwDc9PSEhQx44dVaxYMX355Zdyy8M+MXlOEP5isVhUpEgRWSwWGYZBBQEAAAAFKj+XG7WnUqVKqVSpUjft16RJE126dElbt25V/fr1JUlr165VZmamGjVqlON58fHx6tChgzw8PPT111/L09MzT3HmaZlTSTpx4oTGjx+vChUq6O6779bp06f10UcfmfsjAAAAALBdtWrV1LFjRw0aNEibN2/WL7/8oqefflq9evUyVzA6deqUqlatqs2bN0u6lhy0b99eSUlJmjNnjuLj4xUTE6OYmBhlZGTY9Po2VRDS09O1bNkyffzxx1q7dq1Kly6tvn37asCAAapQoYJNLwwAAAAgewsXLtTTTz+ttm3bysXFRQ888ICmT59uPn/lyhUdOHBAycnJkqRt27aZKxxVqlTJ6lpRUVEKCwvL9WvblCAEBwcrOTlZ9957r7755ht16NBBLi55LkIAAAAA/5oN23o5jYCAAH366ac5Ph8WFmZ1361atbLb+2DTb/ejR4/WiRMntGTJEt1zzz1ycXHRZ599pqSkJLsEAwAAAMCxbKogREZGZmkbPHiwGjVqxBAjAAAAOISRmenoEAqVfz0+qDCWdAAAAIDbFRMIAAAAAJhsGmKUmZmpt99+W19//bXS09PVtm1brVy5UqGhofkVHwAAAHBDmU6yD4KzsKmC8Prrr+ull16Sj4+PQkNDNW3aNM2dOzfXu8cBAAAAuLXZVEFYsGCB3n//fQ0ePFiS9OOPP6pz586aPXs2y50CAADAIZgTa182/VYfHR2tTp06mY/btWsni8Wi06dP2z0wAAAAAAXPpgrC1atX5enpadXm5uamK1eu2DUoAAAAILcM5iDYlU0JgmEY6tevn9Wcg9TUVA0ZMkTe3t5m27Jly+wXIQAAAIACY1OC0Ldv3yxtjz76qN2CAQAAAOBYNiUIc+fOza84AAAAgDxhiJF9sfQQAAAAAJNNFQQAAADgVpNpZDo6hEKFCgIAAAAAEwkCAAAAABNDjAAAAODUmKRsX1QQAAAAAJioIAAAAMCpUUGwLyoIAAAAAExUEAAAAODUDIMKgj1RQQAAAABgIkEAAAAAYGKIEQAAAJxaZiY7KdsTFQQAAAAAJioIAAAAcGosc2pfVBAAAAAAmEgQAAAAAJgYYgQAAACnZhhMUrYnKggAAAAATFQQAAAA4NSYpGxfVBAAAAAAmKggAAAAwKlRQbAvKggAAAAATCQIAAAAAEwMMQIAAIBTy2SZU7uiggAAAADARAUBAAAATo1JyvZFBQEAAACAiQQBAAAAgIkhRgAAAHBqRiaTlO2JCgIAAAAAExUEAAAAODUmKdsXFQQAAAAAJioIAAAAcGoGG6XZFRUEAAAAACYSBAAAAAAmhhgBAADAqWUySdmuqCAAAAAAMFFBAAAAgFNjozT7ooIAAAAAwESCAAAAAMDEECMAAAA4NXZSti8qCAAAAABMVBAAAADg1NhJ2b6oIAAAAAAwUUEAAACAU2MOgn1RQQAAAABgIkEAAAAAYGKIEQAAAJwaOynbFxUEAAAAACaLYRjM6nBCaWlpmjRpkkaNGiUPDw9Hh4NCjM8aCgqfNRQUPmvAjZEgOKn4+Hj5+fnp8uXL8vX1dXQ4KMT4rKGg8FlDQeGzBtwYQ4wAAAAAmEgQAAAAAJhIEAAAAACYSBCclIeHh8aMGcPkKuQ7PmsoKHzWUFD4rAE3xiRlAAAAACYqCAAAAABMJAgAAAAATCQIAAAAAEwkCDaaN2+e/P39bTqnX79+6tq1a77EcysJCwvT1KlTHR0GnFSrVq30/PPPOzoM3MLWr18vi8WiS5cu5fkafM6Qk7Fjx6pOnTqODgO4JZAg/L+cfon/5w+knj176uDBgwUb3E3k9ofmX/3+OkqVKqVOnTpp9+7dNr1eTknSli1b9MQTT9h0rcJs06ZNcnV1VefOnR0dSoH59ttv1bJlSxUrVkxeXl668847NW/ePKs+9vglD/b39/8bsjvGjh2b5Zzk5GSNGjVKFStWlKenp0qVKqWWLVvqq6++Mvvk9YuD7H6Rb9q0qc6cOSM/P7+bnp/T52zZsmWaMGGCzfGg4PXr18/8/Lm7u6tSpUoaP368rl69atXv3XffVfHixZWamprlGsnJyfL19dX06dMLKmygUCBBsFHRokUVGBjo6DD+lQMHDujMmTNatWqV0tLS1LlzZ6Wnp//r65YqVUpeXl52iLBwmDNnjp555hlt2LBBp0+fztfXMgwjyw/Ngvbee+/p/vvvV7NmzfT7779r165d6tWrl4YMGaLhw4c7JCZ7fK5vF2fOnDGPqVOnytfX16otu7/DIUOGaNmyZXrvvfe0f/9+ff/993rwwQd14cKFfInR3d1dwcHBslgseb5GQECAihUrZseokJ86duyoM2fO6NChQxo2bJjGjh2rt99+26rPY489pqSkJC1btizL+UuWLFF6eroeffTRggoZKBwMGIZhGH379jXuv//+LO3r1q0zJBkXL140DMMw5s6da/j5+Vn1mTBhglGqVCnDx8fHGDhwoDFixAijdu3aWa799ttvG8HBwUZAQIDxn//8x0hPTzf7pKamGsOGDTNCQkIMLy8vo2HDhsa6devM548dO2bce++9hr+/v+Hl5WVEREQYK1asMKKiogxJVkffvn2zvcd/3othGMbXX39tSDJ27txptr377rtGjRo1DC8vL6NMmTLGk08+aSQkJFhd4+/HmDFjDMMwjPLlyxtTpkwxr3P8+HHjvvvuM7y9vY1ixYoZPXr0MGJiYnL8OyhMEhISDB8fH2P//v1Gz549jddff918rnfv3sZDDz1k1T89Pd0oUaKEMX/+fMMwDCMjI8OYOHGiERYWZnh6ehq1atUyvvjiC7P/X38PK1euNOrVq2e4ubkZ69atMw4fPmzcd999RmBgoOHt7W00aNDAWL16tdVrnT592ujUqZPh6elphIWFGQsXLszyd3fx4kVj4MCBRsmSJY1ixYoZrVu3Nnbs2JHj/UZHRxtubm5GZGRkluemT59uSDJ+++23G35eW7ZsaTzzzDPGCy+8YBQvXtwICgoyP1u5jWvMmDFG7dq1jY8++sgICwszLBaLYRiG8cUXXxg1atQwPD09jYCAAKNt27ZGYmJijvdzu8vu/7ns+Pn5GfPmzcvx+ZYtW2b5+zYMw4iNjTV69eplhISEGEWLFjVq1KhhfPrpp+Z5ffv2zXJeVFRUlv/D8vL/YsuWLY3nnnvOfK3U1FTjxRdfNMqUKWO4u7sbFStWNGbPnm3zewb7y+7n8t133200btw4S9/u3bsbbdu2zdLesmVLo2fPnoZhGMaLL75o3HHHHUbRokWN8PBwY/To0VY/h//6/+Pv5/79s2IYhnH//fdb/Yy92c9uwFlRQfiXFi5cqNdff11vvvmmtm7dqnLlyumDDz7I0m/dunU6cuSI1q1bp/nz52vevHlWQy+efvppbdq0SZ9//rl27dqlHj16qGPHjjp06JAk6amnnlJaWpo2bNig3bt3680335SPj4/Kli2rpUuXSrpeGZg2bVquYr98+bI+//xzSde+mfuLi4uLpk+frr1792r+/Plau3atXnzxRUnXSvz//HYxu28WMzMzdf/99ysuLk4//fSTVq9eraNHj6pnz565e2Od3OLFi1W1alVVqVJFjz76qD7++GMZ/7/lyCOPPKJvvvlGiYmJZv9Vq1YpOTlZ3bp1kyRNmjRJCxYs0KxZs7R3714NHTpUjz76qH766Ser1xk5cqTeeOMN7du3T7Vq1VJiYqI6deqkNWvWaPv27erYsaO6dOmi6Oho85w+ffro9OnTWr9+vZYuXaoPP/xQ586ds7pujx49dO7cOX333XfaunWr6tWrp7Zt2youLi7b+12yZImuXLmS7Wdh8ODB8vHx0WeffXbTz+v8+fPl7e2t33//XW+99ZbGjx+v1atX2xTX4cOHtXTpUi1btkw7duzQmTNn1Lt3bw0YMED79u3T+vXr1b17d/PvA3kXHByslStXKiEhIdvnly1bpjJlymj8+PHm/xeSlJqaqvr162vFihXas2ePnnjiCT322GPavHmzJGnatGlq0qSJBg0aZJ5XtmzZLNe3x/+Lffr00Weffabp06dr3759+u9//ysfHx97vD3IB0WLFs22Mjhw4ECtXbtWx48fN9uOHj2qDRs2aODAgZKkYsWKad68efrzzz81bdo0ffTRR5oyZcq/iudmP7sBp+XoDOVW0bdvX8PV1dXw9va2Ojw9PW9YQWjUqJHx1FNPWV2rWbNmWSoI5cuXN65evWq29ejRw/xW4/jx44arq6tx6tQpq+u0bdvWGDVqlGEYhlGzZk1j7Nix2caeXWXgRv3+ujf9/zdr99133w3P++KLL4wSJUqYj3P6dvHv30L/8MMPhqurqxEdHW0+v3fvXkOSsXnz5hu+XmHQtGlTY+rUqYZhGMaVK1eMkiVLmt8q/fV4wYIFZv/evXubn4fU1FTDy8vL+PXXX62uOXDgQKN3796GYVz/u1y+fPlNY6levbrx3nvvGYZhGPv27TMkGVu2bDGfP3TokCHJ/Lv7+eefDV9fXyM1NdXqOhUrVjT++9//ZvsaQ4YMueE3zrVq1TLuueceq9j/+Xlt2bKl0bx5c6u2O++80xgxYkSu4xozZozh5uZmnDt3znx+69athiTj2LFjOcYHa7mtIPz0009GmTJlDDc3N6NBgwbG888/b2zcuNGqzz+rUznp3LmzMWzYMPNxdt/e/vOzk5f/F/9+3QMHDhiSslTZcGv4ewUhMzPTWL16teHh4WEMHz48S9+rV68aoaGhVlXHV155xShXrpyRkZGR7fXffvtto379+uZjWysIufnZDTgrKgh/07p1a+3YscPqmD179g3POXDggBo2bGjV9s/HklS9enW5urqaj0uXLm1+a7t7925lZGSocuXK8vHxMY+ffvpJR44ckSQ9++yzeu2119SsWTONGTNGu3btyvN9/vzzz9q6davmzZun/2vv7mOavL44gH9rW2pHHVgsKzW1FWs7iNXMmagz2DFrOhYSZ3whsxMVphvZcBqZJmK26BSSbTg2TOxicAPDlCXDmVATQZ0zEwMKiHEicdAGnNsQ40sqLrz07I+G5+dD63jzp7KdT0LC7du91NP79D7PPUez2QyXyyW6//jx41iwYAEmTpyIcePGYeXKlbh58yY6OzsH3UdjYyP0er3orF98fDwiIyPR2Ng47LGPBk1NTaipqcEbb7wBAJDJZEhJSUFhYaHQXr58OUpKSgAA9+7dw5EjR+B0OgEEzoB3dnZi4cKFongoLi4W4qHPrFmzRG2fz4esrCzExcUhMjISKpUKjY2NwhWEpqYmyGQyzJw5U3iOyWTC+PHjhXZDQwN8Ph+ioqJE/Xs8nqD+H7Xp06eL2g9+TgY7LoPBAI1GI7RnzJiBBQsWwGq1YtmyZdi3bx9u3br1f/07/m1aW1tF73lOTg4AYP78+WhpacGJEyewdOlS/PLLL0hISBgwCbi3txcff/wxrFYr1Go1VCoVjh07JrrSNRgjnRcvXLgAqVQKm802pOexx6e8vBwqlQpjx45FUlISUlJSkJycLIrHkpISSKVSrFq1Ct988w2ICH6/H0VFRVizZg3GjAl81SktLcW8efOg1WqhUqmwbdu2IcfcgwZz7GZstJI96QE8TcLDw2EymUS3Xbt27ZG8tlwuF7UlEgn8fj+AwJc6qVSK2tpa0SICgHCp+6233oLD4YDb7UZFRQVyc3ORl5eHzMzMIY9l8uTJiIyMhMViQXt7O1JSUnD69GkAgNfrRXJyMjIyMrBr1y6o1Wr8/PPPSE9PR1dXFychD0JhYSF6enqg0+mE24gICoUCe/bsQUREBJxOJ2w2G9rb21FZWQmlUolXX30VAIStR263GxMnThS9tkKhELXDw8NF7aysLFRWVuKzzz6DyWSCUqnE0qVLh5Ss6/P5EBMTg1OnTgXd97ASv2azGXfu3MH169dFfzcQSBRubm5GYmLigH0P9DkZzLj6vydSqRSVlZWoqqpCRUUFCgoKkJ2djerqakyePHnAMTFAp9PhwoULQlutVgu/y+VyJCQkICEhAVu2bMHOnTuxY8cObNmyRbR18UGffvopvvjiC+Tn58NqtSI8PBwbNmwYclL5SOdFpVI5pP7Y45eYmIi9e/ciLCwMOp0OMpkM9+/fF8Xjc889BwBIS0tDbm4uTp48Cb/fj7a2NqxZswZAoKqc0+nE9u3b4XA4EBERgUOHDiEvL++hfY8ZMyZoK2J3d7fw+2CO3YyNVnwFYYQsFgvOnTsnuq1/eyAvvPACent70d7eDpPJJPrRarXC4/R6vVA1ZNOmTdi3bx+A/+UP9Pb2Dnn87777Li5duoTDhw8DAGpra+H3+5GXl4c5c+bAbDYHVeAJCwsbsK+4uDi0tbWhra1NuO3y5cu4ffs24uPjhzzO0aKnpwfFxcXIy8sTXYlqaGiATqfDwYMHAQRyOfR6PUpLS1FSUoJly5YJX47j4+OhUCjQ2toaFA+h9mE/6MyZM1i9ejUWL14Mq9UKrVYLr9cr3G+xWNDT04P6+nrhtl9//VV0Rn3mzJn4448/IJPJgvqfMGFCyH6XLFkCuVwe8mDrcrlw79494YrKcON1OOPqI5FIMG/ePGzfvh319fUICwsTYp4NrP97/uACob/4+Hj09PQIJSdDzRdnzpzBokWL8Oabb2LGjBmIjY0NKh89mHkGGNm8aLVa4ff7g3J72NOj78TdpEmTIJMFzmkqlUpRPPZVpZoyZQpsNhv279+Pr7/+Gna7HQaDAQBQVVUFg8GA7OxszJo1C1OnThXlK4Si0WiEvBkgEEuXLl0S2oM9djM2GvECYYQyMzNRWFiIoqIiXL16FTt37sTFixeHVIbPbDbD6XQiNTUVZWVl8Hg8qKmpQW5uLtxuNwBgw4YNOHbsGDweD+rq6vDjjz8iLi4OQGBLhUQiQXl5OW7cuCFKfh3IM888g7Vr1+Kjjz4CEcFkMqG7uxsFBQVoaWnBgQMHgrYgGY1G+Hw+nDhxAh0dHSG3HtntdlitVjidTtTV1aGmpgapqamw2WxB22L+TcrLy3Hr1i2kp6dj2rRpop8lS5YI24wAYMWKFXC5XKisrBS2FwGBRLqsrCxs3LgRRUVFaG5uRl1dHQoKClBUVPSP/U+dOlVIzm1oaMCKFSuEM/AA8Pzzz8Nut2PdunWoqalBfX091q1bB6VSKcSs3W7H3Llz8frrr6OiogJerxdVVVXIzs7G+fPnQ/Y7adIkfPLJJ8jPz0d2djauXLmC5uZm7N69G5s3b8amTZswe/ZsAMOP1+GMCwCqq6uRk5OD8+fPo7W1FWVlZbhx44bw+WHD9/LLL+Orr75CbW0tvF4vjh49iq1btyIxMRHPPvssgMB8cfr0afz222/o6OgAEIjTvqs6jY2NePvtt/Hnn3+KXttoNKK6uhperxcdHR2iOO4z0nnRaDRi1apVSEtLww8//ACPx4NTp07hu+++e9RvFXtM0tPTUVZWhsOHDwvJyUAg5lpbW3Ho0CE0Nzfjyy+/HPAkwSuvvAK32w23240rV64gIyND9P9qDObYzdio9WRTIJ4eIylzumPHDpowYQKpVCpKS0uj9evXi8qwhXrt999/n2w2m9Du6uqiDz/8kIxGI8nlcoqJiaHFixfTxYsXiYjovffeoylTppBCoSCNRkMrV66kjo4O0Ri0Wi1JJJIhlTklCpSolMlkVFpaSkREu3fvppiYGFIqleRwOKi4uDjoee+88w5FRUVxmdN+kpOT6bXXXgt5X3V1taik7OXLlwkAGQwG8vv9osf6/X7Kz88ni8VCcrmcNBoNORwO+umnn4jo4f+WHo+HEhMTSalUkl6vpz179gQl2l2/fp2SkpJIoVCQwWCgb7/9lqKjo8nlcgmPuXv3LmVmZpJOpyO5XE56vZ6cTqco6TyUI0eOUEJCgpDg/+KLL9L+/fuDHhcqXgdTUnCgcfVPMux7nx0OB2k0GlIoFGQ2m4WkbRbaYJOUc3JyaO7cuaRWq2ns2LEUGxtL69evF81NZ8+epenTp5NCoRDKnN68eZMWLVpEKpWKoqOjadu2bZSamiqaJ5uammjOnDmkVCofWuZ0OPNi/zi7f/8+bdy4kWJiYigsLIxMJlPImGWP38OOy/+ks7OTIiIiSK1WBxU0+OCDDygqKopUKhWlpKTQ559/Lorz/vNHV1cXZWRkkFqtpujoaMrNzQ2akwY6djM2WkmIuNbfo7Zw4UJotVocOHDgSQ+FsQFdu3YNer1eSE5njDHG2H8bJymPUGdnJ1wuFxwOB6RSKQ4ePIjjx4+Larcz9jQ5efIkfD4frFYrfv/9d2zevBlGoxHz589/0kNjjDHG2FOAFwgjJJFIcPToUezatQt//fUXLBYLvv/+e9jt9ic9NMZC6u7uxtatW9HS0oJx48bhpZdeQklJSVAFIcYYY4z9N/EWI8YYY4wxxpiAqxgxxhhjjDHGBLxAYIwxxhhjjAl4gcAYY4wxxhgT8AKBMcYYY4wxJuAFAmOMMcYYY0zACwTGGGOMMcaYgBcIjDHGGGOMMQEvEBhjjDHGGGMCXiAwxhhjjDHGBH8D6S4jx6WZxOgAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "T-test for Highest Ratio: T-Statistic = -57.59965843801415, P-Value = 0.0\n", + "T-test for Average Others: T-Statistic = -21.080776226637518, P-Value = 1.2478046488137352e-93\n", + "T-test for T-Statistic: T-Statistic = nan, P-Value = nan\n", + "T-test for P-Value: T-Statistic = nan, P-Value = nan\n" + ] + }, + { + "ename": "ValueError", + "evalue": "Input X contains NaN.\nRandomForestClassifier does not accept missing values encoded as NaN natively. For supervised learning, you might want to consider sklearn.ensemble.HistGradientBoostingClassifier and Regressor which accept missing values encoded as NaNs natively. Alternatively, it is possible to preprocess the data, for instance by using an imputer transformer in a pipeline or drop samples with missing values. See https://scikit-learn.org/stable/modules/impute.html You can find a list of all estimators that handle NaN values at the following page: https://scikit-learn.org/stable/modules/impute.html#estimators-that-handle-nan-values", + "output_type": "error", + "traceback": [ + "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[1;31mValueError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[1;32mIn[46], line 91\u001b[0m\n\u001b[0;32m 89\u001b[0m \u001b[38;5;66;03m# Train a RandomForestClassifier\u001b[39;00m\n\u001b[0;32m 90\u001b[0m clf \u001b[38;5;241m=\u001b[39m RandomForestClassifier(random_state\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m42\u001b[39m)\n\u001b[1;32m---> 91\u001b[0m \u001b[43mclf\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfit\u001b[49m\u001b[43m(\u001b[49m\u001b[43mX_train\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43my_train\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 93\u001b[0m \u001b[38;5;66;03m# Make predictions\u001b[39;00m\n\u001b[0;32m 94\u001b[0m y_pred \u001b[38;5;241m=\u001b[39m clf\u001b[38;5;241m.\u001b[39mpredict(X_test)\n", + "File \u001b[1;32m~\\.conda\\envs\\py310\\lib\\site-packages\\sklearn\\base.py:1152\u001b[0m, in \u001b[0;36m_fit_context..decorator..wrapper\u001b[1;34m(estimator, *args, **kwargs)\u001b[0m\n\u001b[0;32m 1145\u001b[0m estimator\u001b[38;5;241m.\u001b[39m_validate_params()\n\u001b[0;32m 1147\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m config_context(\n\u001b[0;32m 1148\u001b[0m skip_parameter_validation\u001b[38;5;241m=\u001b[39m(\n\u001b[0;32m 1149\u001b[0m prefer_skip_nested_validation \u001b[38;5;129;01mor\u001b[39;00m global_skip_validation\n\u001b[0;32m 1150\u001b[0m )\n\u001b[0;32m 1151\u001b[0m ):\n\u001b[1;32m-> 1152\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m fit_method(estimator, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n", + "File \u001b[1;32m~\\.conda\\envs\\py310\\lib\\site-packages\\sklearn\\ensemble\\_forest.py:348\u001b[0m, in \u001b[0;36mBaseForest.fit\u001b[1;34m(self, X, y, sample_weight)\u001b[0m\n\u001b[0;32m 346\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m issparse(y):\n\u001b[0;32m 347\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124msparse multilabel-indicator for y is not supported.\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m--> 348\u001b[0m X, y \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_validate_data\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 349\u001b[0m \u001b[43m \u001b[49m\u001b[43mX\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43my\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmulti_output\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43maccept_sparse\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcsc\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdtype\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mDTYPE\u001b[49m\n\u001b[0;32m 350\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 351\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m sample_weight \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m 352\u001b[0m sample_weight \u001b[38;5;241m=\u001b[39m _check_sample_weight(sample_weight, X)\n", + "File \u001b[1;32m~\\.conda\\envs\\py310\\lib\\site-packages\\sklearn\\base.py:622\u001b[0m, in \u001b[0;36mBaseEstimator._validate_data\u001b[1;34m(self, X, y, reset, validate_separately, cast_to_ndarray, **check_params)\u001b[0m\n\u001b[0;32m 620\u001b[0m y \u001b[38;5;241m=\u001b[39m check_array(y, input_name\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124my\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mcheck_y_params)\n\u001b[0;32m 621\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m--> 622\u001b[0m X, y \u001b[38;5;241m=\u001b[39m check_X_y(X, y, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mcheck_params)\n\u001b[0;32m 623\u001b[0m out \u001b[38;5;241m=\u001b[39m X, y\n\u001b[0;32m 625\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m no_val_X \u001b[38;5;129;01mand\u001b[39;00m check_params\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mensure_2d\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;28;01mTrue\u001b[39;00m):\n", + "File \u001b[1;32m~\\.conda\\envs\\py310\\lib\\site-packages\\sklearn\\utils\\validation.py:1146\u001b[0m, in \u001b[0;36mcheck_X_y\u001b[1;34m(X, y, accept_sparse, accept_large_sparse, dtype, order, copy, force_all_finite, ensure_2d, allow_nd, multi_output, ensure_min_samples, ensure_min_features, y_numeric, estimator)\u001b[0m\n\u001b[0;32m 1141\u001b[0m estimator_name \u001b[38;5;241m=\u001b[39m _check_estimator_name(estimator)\n\u001b[0;32m 1142\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[0;32m 1143\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mestimator_name\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m requires y to be passed, but the target y is None\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 1144\u001b[0m )\n\u001b[1;32m-> 1146\u001b[0m X \u001b[38;5;241m=\u001b[39m \u001b[43mcheck_array\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 1147\u001b[0m \u001b[43m \u001b[49m\u001b[43mX\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1148\u001b[0m \u001b[43m \u001b[49m\u001b[43maccept_sparse\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43maccept_sparse\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1149\u001b[0m \u001b[43m \u001b[49m\u001b[43maccept_large_sparse\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43maccept_large_sparse\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1150\u001b[0m \u001b[43m \u001b[49m\u001b[43mdtype\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdtype\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1151\u001b[0m \u001b[43m \u001b[49m\u001b[43morder\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43morder\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1152\u001b[0m \u001b[43m \u001b[49m\u001b[43mcopy\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcopy\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1153\u001b[0m \u001b[43m \u001b[49m\u001b[43mforce_all_finite\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mforce_all_finite\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1154\u001b[0m \u001b[43m \u001b[49m\u001b[43mensure_2d\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mensure_2d\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1155\u001b[0m \u001b[43m \u001b[49m\u001b[43mallow_nd\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mallow_nd\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1156\u001b[0m \u001b[43m \u001b[49m\u001b[43mensure_min_samples\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mensure_min_samples\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1157\u001b[0m \u001b[43m \u001b[49m\u001b[43mensure_min_features\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mensure_min_features\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1158\u001b[0m \u001b[43m \u001b[49m\u001b[43mestimator\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mestimator\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1159\u001b[0m \u001b[43m \u001b[49m\u001b[43minput_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mX\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1160\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 1162\u001b[0m y \u001b[38;5;241m=\u001b[39m _check_y(y, multi_output\u001b[38;5;241m=\u001b[39mmulti_output, y_numeric\u001b[38;5;241m=\u001b[39my_numeric, estimator\u001b[38;5;241m=\u001b[39mestimator)\n\u001b[0;32m 1164\u001b[0m check_consistent_length(X, y)\n", + "File \u001b[1;32m~\\.conda\\envs\\py310\\lib\\site-packages\\sklearn\\utils\\validation.py:957\u001b[0m, in \u001b[0;36mcheck_array\u001b[1;34m(array, accept_sparse, accept_large_sparse, dtype, order, copy, force_all_finite, ensure_2d, allow_nd, ensure_min_samples, ensure_min_features, estimator, input_name)\u001b[0m\n\u001b[0;32m 951\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[0;32m 952\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mFound array with dim \u001b[39m\u001b[38;5;132;01m%d\u001b[39;00m\u001b[38;5;124m. \u001b[39m\u001b[38;5;132;01m%s\u001b[39;00m\u001b[38;5;124m expected <= 2.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 953\u001b[0m \u001b[38;5;241m%\u001b[39m (array\u001b[38;5;241m.\u001b[39mndim, estimator_name)\n\u001b[0;32m 954\u001b[0m )\n\u001b[0;32m 956\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m force_all_finite:\n\u001b[1;32m--> 957\u001b[0m \u001b[43m_assert_all_finite\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 958\u001b[0m \u001b[43m \u001b[49m\u001b[43marray\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 959\u001b[0m \u001b[43m \u001b[49m\u001b[43minput_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43minput_name\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 960\u001b[0m \u001b[43m \u001b[49m\u001b[43mestimator_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mestimator_name\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 961\u001b[0m \u001b[43m \u001b[49m\u001b[43mallow_nan\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mforce_all_finite\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m==\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mallow-nan\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[0;32m 962\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 964\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m ensure_min_samples \u001b[38;5;241m>\u001b[39m \u001b[38;5;241m0\u001b[39m:\n\u001b[0;32m 965\u001b[0m n_samples \u001b[38;5;241m=\u001b[39m _num_samples(array)\n", + "File \u001b[1;32m~\\.conda\\envs\\py310\\lib\\site-packages\\sklearn\\utils\\validation.py:122\u001b[0m, in \u001b[0;36m_assert_all_finite\u001b[1;34m(X, allow_nan, msg_dtype, estimator_name, input_name)\u001b[0m\n\u001b[0;32m 119\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m first_pass_isfinite:\n\u001b[0;32m 120\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m\n\u001b[1;32m--> 122\u001b[0m \u001b[43m_assert_all_finite_element_wise\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 123\u001b[0m \u001b[43m \u001b[49m\u001b[43mX\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 124\u001b[0m \u001b[43m \u001b[49m\u001b[43mxp\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mxp\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 125\u001b[0m \u001b[43m \u001b[49m\u001b[43mallow_nan\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mallow_nan\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 126\u001b[0m \u001b[43m \u001b[49m\u001b[43mmsg_dtype\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmsg_dtype\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 127\u001b[0m \u001b[43m \u001b[49m\u001b[43mestimator_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mestimator_name\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 128\u001b[0m \u001b[43m \u001b[49m\u001b[43minput_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43minput_name\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 129\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[1;32m~\\.conda\\envs\\py310\\lib\\site-packages\\sklearn\\utils\\validation.py:171\u001b[0m, in \u001b[0;36m_assert_all_finite_element_wise\u001b[1;34m(X, xp, allow_nan, msg_dtype, estimator_name, input_name)\u001b[0m\n\u001b[0;32m 154\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m estimator_name \u001b[38;5;129;01mand\u001b[39;00m input_name \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mX\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;129;01mand\u001b[39;00m has_nan_error:\n\u001b[0;32m 155\u001b[0m \u001b[38;5;66;03m# Improve the error message on how to handle missing values in\u001b[39;00m\n\u001b[0;32m 156\u001b[0m \u001b[38;5;66;03m# scikit-learn.\u001b[39;00m\n\u001b[0;32m 157\u001b[0m msg_err \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m (\n\u001b[0;32m 158\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;132;01m{\u001b[39;00mestimator_name\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m does not accept missing values\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 159\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m encoded as NaN natively. For supervised learning, you might want\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 169\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m#estimators-that-handle-nan-values\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 170\u001b[0m )\n\u001b[1;32m--> 171\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(msg_err)\n", + "\u001b[1;31mValueError\u001b[0m: Input X contains NaN.\nRandomForestClassifier does not accept missing values encoded as NaN natively. For supervised learning, you might want to consider sklearn.ensemble.HistGradientBoostingClassifier and Regressor which accept missing values encoded as NaNs natively. Alternatively, it is possible to preprocess the data, for instance by using an imputer transformer in a pipeline or drop samples with missing values. See https://scikit-learn.org/stable/modules/impute.html You can find a list of all estimators that handle NaN values at the following page: https://scikit-learn.org/stable/modules/impute.html#estimators-that-handle-nan-values" + ] + } + ], + "source": [ + "import pandas as pd\n", + "import seaborn as sns\n", + "import matplotlib.pyplot as plt\n", + "from scipy.stats import ttest_ind\n", + "from sklearn.model_selection import train_test_split\n", + "from sklearn.ensemble import RandomForestClassifier\n", + "from sklearn.metrics import classification_report, confusion_matrix\n", + "\n", + "# Assuming list_of_significance and list_of_significance_watermarked are already defined\n", + "# Create DataFrames from the lists\n", + "df_significance = pd.DataFrame(list_of_significance, columns=['Highest Ratio', 'Average Others', 'T-Statistic', 'P-Value'])\n", + "df_significance_watermarked = pd.DataFrame(list_of_significance_watermarked, columns=['Highest Ratio', 'Average Others', 'T-Statistic', 'P-Value'])\n", + "\n", + "# Add a label column to distinguish between the two sets\n", + "df_significance['Label'] = 'Original'\n", + "df_significance_watermarked['Label'] = 'Watermarked'\n", + "\n", + "# Combine the DataFrames\n", + "combined_df = pd.concat([df_significance, df_significance_watermarked], ignore_index=True)\n", + "\n", + "# Perform EDA\n", + "def perform_eda(df):\n", + " # Display the first few rows of the DataFrame\n", + " print(\"First few rows of the DataFrame:\")\n", + " print(df.head())\n", + "\n", + " # Display statistical summary\n", + " print(\"\\nStatistical Summary:\")\n", + " print(df.describe())\n", + "\n", + " # Check for missing values\n", + " print(\"\\nMissing Values:\")\n", + " print(df.isnull().sum())\n", + "\n", + " # Visualize the distributions of the features\n", + " plt.figure(figsize=(12, 8))\n", + " sns.histplot(data=df, x='Highest Ratio', hue='Label', element='step', kde=True)\n", + " plt.title('Distribution of Highest Ratio')\n", + " plt.show()\n", + "\n", + " plt.figure(figsize=(12, 8))\n", + " sns.histplot(data=df, x='Average Others', hue='Label', element='step', kde=True)\n", + " plt.title('Distribution of Average Others')\n", + " plt.show()\n", + "\n", + " plt.figure(figsize=(12, 8))\n", + " sns.histplot(data=df, x='T-Statistic', hue='Label', element='step', kde=True)\n", + " plt.title('Distribution of T-Statistic')\n", + " plt.show()\n", + "\n", + " plt.figure(figsize=(12, 8))\n", + " sns.histplot(data=df, x='P-Value', hue='Label', element='step', kde=True)\n", + " plt.title('Distribution of P-Value')\n", + " plt.show()\n", + "\n", + " # Pairplot to see relationships\n", + " sns.pairplot(df, hue='Label')\n", + " plt.show()\n", + "\n", + " # Correlation matrix\n", + " plt.figure(figsize=(10, 8))\n", + " sns.heatmap(df.drop(columns=['Label']).corr(), annot=True, cmap='coolwarm')\n", + " plt.title('Correlation Matrix')\n", + " plt.show()\n", + "\n", + " # T-test to check for significant differences\n", + " original = df[df['Label'] == 'Original']\n", + " watermarked = df[df['Label'] == 'Watermarked']\n", + "\n", + " for column in ['Highest Ratio', 'Average Others', 'T-Statistic', 'P-Value']:\n", + " t_stat, p_value = ttest_ind(original[column], watermarked[column])\n", + " print(f\"T-test for {column}: T-Statistic = {t_stat}, P-Value = {p_value}\")\n", + "\n", + "# Perform EDA on the combined DataFrame\n", + "perform_eda(combined_df)\n", + "\n", + "# Check if the data is ready for machine learning classification\n", + "\n", + "# Prepare the data\n", + "X = combined_df.drop(columns=['Label'])\n", + "y = combined_df['Label']\n", + "\n", + "# Convert labels to numerical values for ML model\n", + "y = y.map({'Original': 0, 'Watermarked': 1})\n", + "\n", + "# Split the data into training and testing sets\n", + "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n", + "\n", + "# Train a RandomForestClassifier\n", + "clf = RandomForestClassifier(random_state=42)\n", + "clf.fit(X_train, y_train)\n", + "\n", + "# Make predictions\n", + "y_pred = clf.predict(X_test)\n", + "\n", + "# Evaluate the model\n", + "print(\"\\nClassification Report:\")\n", + "print(classification_report(y_test, y_pred))\n", + "\n", + "print(\"\\nConfusion Matrix:\")\n", + "print(confusion_matrix(y_test, y_pred))\n", + "\n", + "# Feature importances\n", + "feature_importances = clf.feature_importances_\n", + "\n", + "# Create a DataFrame for feature importances\n", + "feature_importances_df = pd.DataFrame({\n", + " 'Feature': X.columns,\n", + " 'Importance': feature_importances\n", + "}).sort_values(by='Importance', ascending=False)\n", + "\n", + "# Plot feature importances\n", + "plt.figure(figsize=(12, 8))\n", + "sns.barplot(x='Importance', y='Feature', data=feature_importances_df, palette='viridis')\n", + "plt.title('Feature Importances')\n", + "plt.show()\n", + "\n", + "# Heatmap for feature importances\n", + "plt.figure(figsize=(10, 8))\n", + "sns.heatmap(feature_importances_df.set_index('Feature').T, annot=True, cmap='viridis')\n", + "plt.title('Heatmap of Feature Importances')\n", + "plt.show()\n" + ] + }, + { + "cell_type": "code", + "execution_count": 45, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "DN8bw7WqUXX5", + "outputId": "10c5dd1f-0e8d-43a7-82e0-02dea500dbe8" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Logistic Regression Classification Report:\n", + " precision recall f1-score support\n", + "\n", + " 0 0.83 0.87 0.85 415\n", + " 1 0.85 0.81 0.83 385\n", + "\n", + " accuracy 0.84 800\n", + " macro avg 0.84 0.84 0.84 800\n", + "weighted avg 0.84 0.84 0.84 800\n", + "\n", + "\n", + "Logistic Regression Confusion Matrix:\n", + "[[360 55]\n", + " [ 73 312]]\n", + "\n", + "Decision Tree Classification Report:\n", + " precision recall f1-score support\n", + "\n", + " 0 0.92 0.92 0.92 415\n", + " 1 0.91 0.91 0.91 385\n", + "\n", + " accuracy 0.91 800\n", + " macro avg 0.91 0.91 0.91 800\n", + "weighted avg 0.91 0.91 0.91 800\n", + "\n", + "\n", + "Decision Tree Confusion Matrix:\n", + "[[380 35]\n", + " [ 35 350]]\n", + "\n", + "Random Forest Classification Report:\n", + " precision recall f1-score support\n", + "\n", + " 0 0.91 0.94 0.93 415\n", + " 1 0.94 0.90 0.92 385\n", + "\n", + " accuracy 0.92 800\n", + " macro avg 0.92 0.92 0.92 800\n", + "weighted avg 0.92 0.92 0.92 800\n", + "\n", + "\n", + "Random Forest Confusion Matrix:\n", + "[[391 24]\n", + " [ 39 346]]\n", + "\n", + "Support Vector Machine Classification Report:\n", + " precision recall f1-score support\n", + "\n", + " 0 0.71 0.79 0.75 415\n", + " 1 0.74 0.66 0.70 385\n", + "\n", + " accuracy 0.73 800\n", + " macro avg 0.73 0.72 0.72 800\n", + "weighted avg 0.73 0.72 0.72 800\n", + "\n", + "\n", + "Support Vector Machine Confusion Matrix:\n", + "[[327 88]\n", + " [132 253]]\n", + "\n", + "Gradient Boosting Classification Report:\n", + " precision recall f1-score support\n", + "\n", + " 0 0.93 0.94 0.94 415\n", + " 1 0.94 0.92 0.93 385\n", + "\n", + " accuracy 0.94 800\n", + " macro avg 0.94 0.93 0.93 800\n", + "weighted avg 0.94 0.94 0.93 800\n", + "\n", + "\n", + "Gradient Boosting Confusion Matrix:\n", + "[[392 23]\n", + " [ 29 356]]\n", + "\n", + "AdaBoost Classification Report:\n", + " precision recall f1-score support\n", + "\n", + " 0 0.90 0.91 0.90 415\n", + " 1 0.90 0.89 0.89 385\n", + "\n", + " accuracy 0.90 800\n", + " macro avg 0.90 0.90 0.90 800\n", + "weighted avg 0.90 0.90 0.90 800\n", + "\n", + "\n", + "AdaBoost Confusion Matrix:\n", + "[[376 39]\n", + " [ 44 341]]\n", + "\n", + "Naive Bayes Classification Report:\n", + " precision recall f1-score support\n", + "\n", + " 0 0.78 0.81 0.79 415\n", + " 1 0.78 0.76 0.77 385\n", + "\n", + " accuracy 0.78 800\n", + " macro avg 0.78 0.78 0.78 800\n", + "weighted avg 0.78 0.78 0.78 800\n", + "\n", + "\n", + "Naive Bayes Confusion Matrix:\n", + "[[335 80]\n", + " [ 94 291]]\n", + "\n", + "K-Nearest Neighbors Classification Report:\n", + " precision recall f1-score support\n", + "\n", + " 0 0.82 0.87 0.84 415\n", + " 1 0.85 0.79 0.82 385\n", + "\n", + " accuracy 0.83 800\n", + " macro avg 0.83 0.83 0.83 800\n", + "weighted avg 0.83 0.83 0.83 800\n", + "\n", + "\n", + "K-Nearest Neighbors Confusion Matrix:\n", + "[[361 54]\n", + " [ 81 304]]\n" + ] + } + ], + "source": [ + "import pandas as pd\n", + "import seaborn as sns\n", + "import matplotlib.pyplot as plt\n", + "from sklearn.model_selection import train_test_split\n", + "from sklearn.linear_model import LogisticRegression\n", + "from sklearn.tree import DecisionTreeClassifier\n", + "from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier\n", + "from sklearn.svm import SVC\n", + "from sklearn.naive_bayes import GaussianNB\n", + "from sklearn.neighbors import KNeighborsClassifier\n", + "from sklearn.metrics import classification_report, confusion_matrix\n", + "\n", + "# Assuming list_of_significance and list_of_significance_watermarked are already defined\n", + "# Create DataFrames from the lists\n", + "df_significance = pd.DataFrame(list_of_significance, columns=['Highest Ratio', 'Average Others', 'T-Statistic', 'P-Value'])\n", + "df_significance_watermarked = pd.DataFrame(list_of_significance_watermarked, columns=['Highest Ratio', 'Average Others', 'T-Statistic', 'P-Value'])\n", + "\n", + "# Add a label column to distinguish between the two sets\n", + "df_significance['Label'] = 'Original'\n", + "df_significance_watermarked['Label'] = 'Watermarked'\n", + "\n", + "# Combine the DataFrames\n", + "combined_df = pd.concat([df_significance, df_significance_watermarked], ignore_index=True)\n", + "combined_df = combined_df.dropna()\n", + "\n", + "# Prepare the data\n", + "X = combined_df.drop(columns=['Label'])\n", + "y = combined_df['Label']\n", + "\n", + "# Convert labels to numerical values for ML model\n", + "y = y.map({'Original': 0, 'Watermarked': 1})\n", + "\n", + "# Split the data into training and testing sets\n", + "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n", + "\n", + "# Initialize models\n", + "models = {\n", + " 'Logistic Regression': LogisticRegression(random_state=42, max_iter=1000),\n", + " 'Decision Tree': DecisionTreeClassifier(random_state=42),\n", + " 'Random Forest': RandomForestClassifier(random_state=42),\n", + " 'Support Vector Machine': SVC(random_state=42),\n", + " 'Gradient Boosting': GradientBoostingClassifier(random_state=42),\n", + " 'AdaBoost': AdaBoostClassifier(random_state=42),\n", + " 'Naive Bayes': GaussianNB(),\n", + " 'K-Nearest Neighbors': KNeighborsClassifier()\n", + "}\n", + "\n", + "# Train and evaluate models\n", + "for model_name, model in models.items():\n", + " model.fit(X_train, y_train)\n", + " y_pred = model.predict(X_test)\n", + " print(f\"\\n{model_name} Classification Report:\")\n", + " print(classification_report(y_test, y_pred))\n", + " print(f\"\\n{model_name} Confusion Matrix:\")\n", + " print(confusion_matrix(y_test, y_pred))\n", + "\n", + " # Feature importances (only for models that provide it)\n", + " if hasattr(model, 'feature_importances_'):\n", + " feature_importances = model.feature_importances_\n", + " feature_importances_df = pd.DataFrame({\n", + " 'Feature': X.columns,\n", + " 'Importance': feature_importances\n", + " }).sort_values(by='Importance', ascending=False)\n", + "\n", + " # Plot feature importances\n", + " # plt.figure(figsize=(12, 8))\n", + " # sns.barplot(x='Importance', y='Feature', data=feature_importances_df, palette='viridis')\n", + " # plt.title(f'{model_name} Feature Importances')\n", + " plt.show()\n" + ] + }, + { + "cell_type": "code", + "execution_count": 42, + "metadata": { + "id": "sJxGEZAJzPmz" + }, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 33, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "hOnM5F1LXklH", + "outputId": "97eb0191-4324-40a5-e7f6-4479ad4a3443" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Number of test cases created: 2000\n" + ] + } + ], + "source": [ + "import os\n", + "import random\n", + "\n", + "def extract_test_cases(folder_path, num_cases=2000, words_per_case=300):\n", + " test_cases = []\n", + " book_files = [f for f in os.listdir(folder_path) if os.path.isfile(os.path.join(folder_path, f))]\n", + "\n", + " # Calculate the number of test cases to extract from each book\n", + " cases_per_book = num_cases // len(book_files)\n", + " extra_cases = num_cases % len(book_files)\n", + "\n", + " for book_file in book_files:\n", + " with open(os.path.join(folder_path, book_file), 'r', encoding='utf-8') as file:\n", + " text = file.read()\n", + " words = text.split()\n", + " num_words = len(words)\n", + "\n", + " # Ensure enough words are available to extract the cases\n", + " if num_words < words_per_case:\n", + " continue\n", + "\n", + " # Determine the number of cases to extract from this book\n", + " num_cases_from_book = cases_per_book\n", + " if extra_cases > 0:\n", + " num_cases_from_book += 1\n", + " extra_cases -= 1\n", + "\n", + " for _ in range(num_cases_from_book):\n", + " start_index = random.randint(0, num_words - words_per_case)\n", + " case = ' '.join(words[start_index:start_index + words_per_case])\n", + " test_cases.append(case)\n", + "\n", + " if len(test_cases) == num_cases:\n", + " return test_cases\n", + "\n", + " return test_cases\n", + "\n", + "# Usage example\n", + "folder_path = 'books'\n", + "test_cases = extract_test_cases(folder_path)\n", + "\n", + "# Output the number of test cases created\n", + "print(f\"Number of test cases created: {len(test_cases)}\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 38, + "metadata": { + "id": "9NEvIc_HY43Z" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "___________________________________________________________________________________________________________________________\n", + "Doing 1\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.22814207650273222\n", + "T-Statistic: -21.334991021776784\n", + "P-Value: 0.00022530414214046572\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23333333333333334\n", + "Average of Other Ratios: 0.1822033898305085\n", + "T-Statistic: -3.53275826407369\n", + "P-Value: 0.038562976693981454\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 2\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4098360655737705\n", + "Average of Other Ratios: 0.23333333333333334\n", + "T-Statistic: -4.992251154606664\n", + "P-Value: 0.015458009685690827\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2033898305084746\n", + "Average of Other Ratios: 0.1391949152542373\n", + "T-Statistic: -3.4405910948750495\n", + "P-Value: 0.04121820653114378\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 2\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 3\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.34815573770491803\n", + "T-Statistic: -6.977885499593617\n", + "P-Value: 0.0060406875581721555\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.27033898305084747\n", + "T-Statistic: -2.228607614649941\n", + "P-Value: 0.11214158967770235\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 3\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 4\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.2573087431693989\n", + "T-Statistic: -17.794177111160675\n", + "P-Value: 0.0003870090924213516\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.16836158192090395\n", + "T-Statistic: -2.451612903225806\n", + "P-Value: 0.2465587655124727\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 4\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 5\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.3151639344262295\n", + "T-Statistic: -1.713189822924711\n", + "P-Value: 0.18519433572899746\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.288135593220339\n", + "Average of Other Ratios: 0.21087570621468926\n", + "T-Statistic: -5.467540160267347\n", + "P-Value: 0.012025943288987453\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 5\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 6\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45901639344262296\n", + "Average of Other Ratios: 0.20416666666666666\n", + "T-Statistic: -8.101361023294555\n", + "P-Value: 0.003930735409185079\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.1307909604519774\n", + "T-Statistic: -11.145126479863883\n", + "P-Value: 0.0015479966208348658\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 6\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 7\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.27424863387978143\n", + "T-Statistic: -2.647512144273123\n", + "P-Value: 0.07715790266759627\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.1603813559322034\n", + "T-Statistic: -4.047402698396378\n", + "P-Value: 0.027156785257683596\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 7\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 8\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.28599726775956286\n", + "T-Statistic: -9.817142706536112\n", + "P-Value: 0.002246603044354501\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3\n", + "Average of Other Ratios: 0.22033898305084745\n", + "T-Statistic: -11.51260179108094\n", + "P-Value: 0.0014069485474090153\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 8\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 9\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.2375\n", + "T-Statistic: -5.570367388129549\n", + "P-Value: 0.011418116056075428\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.18149717514124292\n", + "T-Statistic: -3.7964977175244834\n", + "P-Value: 0.03208088709594881\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 9\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 10\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.2775273224043716\n", + "T-Statistic: -4.727075685541707\n", + "P-Value: 0.01793917650756737\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.16871468926553673\n", + "T-Statistic: -3.7922455055393622\n", + "P-Value: 0.03217383630567124\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 10\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 11\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4098360655737705\n", + "Average of Other Ratios: 0.25\n", + "T-Statistic: -3.714254520543179\n", + "P-Value: 0.033941551397426564\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23333333333333334\n", + "Average of Other Ratios: 0.1398305084745763\n", + "T-Statistic: -4.975896705378727\n", + "P-Value: 0.015597602000975219\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 11\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 12\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.24904371584699453\n", + "T-Statistic: -14.463091326070671\n", + "P-Value: 0.0007165776065197027\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.20261299435028246\n", + "T-Statistic: -3.3817063110386885\n", + "P-Value: 0.04303714816975945\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 12\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 13\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.275\n", + "T-Statistic: -5.83498532451519\n", + "P-Value: 0.01002850287511932\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.288135593220339\n", + "Average of Other Ratios: 0.18177966101694915\n", + "T-Statistic: -3.365869501933496\n", + "P-Value: 0.04354367094755919\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 13\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 14\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.23750000000000002\n", + "T-Statistic: -15.536893060799459\n", + "P-Value: 0.0005793474370025991\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.1477401129943503\n", + "T-Statistic: -3.4073375272246085\n", + "P-Value: 0.04223311481214282\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 14\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 15\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7\n", + "Average of Other Ratios: 0.32745901639344266\n", + "T-Statistic: -6.77625507348341\n", + "P-Value: 0.006568385846444286\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.27838983050847455\n", + "T-Statistic: -2.9307183932115923\n", + "P-Value: 0.06096526759447833\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 15\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 16\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4426229508196721\n", + "Average of Other Ratios: 0.21666666666666667\n", + "T-Statistic: -4.119026835630454\n", + "P-Value: 0.025932160329463834\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.16666666666666666\n", + "Average of Other Ratios: 0.1228813559322034\n", + "T-Statistic: -10.333333333333332\n", + "P-Value: 0.001933293191806968\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 16\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 17\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.23333333333333334\n", + "T-Statistic: -9.613578441019637\n", + "P-Value: 0.0023886490069146135\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3050847457627119\n", + "Average of Other Ratios: 0.21101694915254238\n", + "T-Statistic: -4.384236405710172\n", + "P-Value: 0.02197310950253267\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 17\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 18\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.38333333333333336\n", + "Average of Other Ratios: 0.2775956284153005\n", + "T-Statistic: -2.196141651943659\n", + "P-Value: 0.11558815206376069\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2033898305084746\n", + "Average of Other Ratios: 0.14745762711864407\n", + "T-Statistic: -2.9054879908745583\n", + "P-Value: 0.062224127599699926\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 18\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 19\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.29480874316939887\n", + "T-Statistic: -3.9127157656292244\n", + "P-Value: 0.029668656491470317\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.21087570621468926\n", + "T-Statistic: -2.5909821905688375\n", + "P-Value: 0.08100515899541934\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 19\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 20\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3770491803278688\n", + "Average of Other Ratios: 0.2125\n", + "T-Statistic: -4.70897478231848\n", + "P-Value: 0.018126865049367218\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.21666666666666667\n", + "Average of Other Ratios: 0.1440677966101695\n", + "T-Statistic: -2.967580383634676\n", + "P-Value: 0.05918282683371976\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 20\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 21\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.3523907103825137\n", + "T-Statistic: -5.470026246143757\n", + "P-Value: 0.012010754748633298\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3206920903954802\n", + "T-Statistic: -4.689814564762172\n", + "P-Value: 0.018328326818943686\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 21\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 22\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3442622950819672\n", + "Average of Other Ratios: 0.2875\n", + "T-Statistic: -2.0013563154719005\n", + "P-Value: 0.13914298161809877\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.13940677966101694\n", + "T-Statistic: -5.09630233956434\n", + "P-Value: 0.014606958299961344\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 22\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 23\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.35\n", + "Average of Other Ratios: 0.22786885245901642\n", + "T-Statistic: -2.9204148617045544\n", + "P-Value: 0.06147547219131401\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23333333333333334\n", + "Average of Other Ratios: 0.11440677966101695\n", + "T-Statistic: -6.328859555783819\n", + "P-Value: 0.007975829484392977\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 23\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 24\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.2573770491803279\n", + "T-Statistic: -6.707544913199492\n", + "P-Value: 0.0067620282205820385\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.1896892655367232\n", + "T-Statistic: -2.202614379084967\n", + "P-Value: 0.1148909616099501\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 24\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 25\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.20833333333333334\n", + "T-Statistic: -7.9670319147285165\n", + "P-Value: 0.004125551420928103\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.12662429378531073\n", + "T-Statistic: -5.286516953753678\n", + "P-Value: 0.013202833807875401\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 25\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 26\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3770491803278688\n", + "Average of Other Ratios: 0.24583333333333332\n", + "T-Statistic: -12.513573727485\n", + "P-Value: 0.0011000978118262336\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3\n", + "Average of Other Ratios: 0.1864406779661017\n", + "T-Statistic: -2.9476070119292004\n", + "P-Value: 0.060140398566708664\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 26\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 27\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.31953551912568307\n", + "T-Statistic: -7.4329764222856545\n", + "P-Value: 0.005039460040419282\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.2657485875706215\n", + "T-Statistic: -7.979030913275075\n", + "P-Value: 0.004107637499059575\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 27\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 28\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4918032786885246\n", + "Average of Other Ratios: 0.2\n", + "T-Statistic: -10.721537070870632\n", + "P-Value: 0.0017348546540608164\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.1864406779661017\n", + "Average of Other Ratios: 0.14336158192090395\n", + "T-Statistic: -3.0162558762483083\n", + "P-Value: 0.05692805669660707\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 28\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 29\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.1825136612021858\n", + "T-Statistic: -9.313256255544996\n", + "P-Value: 0.002620765891462303\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.21666666666666667\n", + "Average of Other Ratios: 0.1271186440677966\n", + "T-Statistic: -8.184904804985008\n", + "P-Value: 0.0038156709515571787\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 29\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 30\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.27759562841530055\n", + "T-Statistic: -4.406020658142731\n", + "P-Value: 0.02168373286648725\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23333333333333334\n", + "Average of Other Ratios: 0.1822033898305085\n", + "T-Statistic: -4.794807132575457\n", + "P-Value: 0.017258873277490094\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 30\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 31\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.21967213114754097\n", + "T-Statistic: -6.698633516057464\n", + "P-Value: 0.006787690665758875\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.13516949152542374\n", + "T-Statistic: -9.58743044198646\n", + "P-Value: 0.002407749109248065\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 31\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 32\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.2444672131147541\n", + "T-Statistic: -5.639134297794538\n", + "P-Value: 0.011033909197241593\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2033898305084746\n", + "Average of Other Ratios: 0.16454802259887005\n", + "T-Statistic: -3.6666666666666683\n", + "P-Value: 0.03508151471548188\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 32\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 33\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.23224043715846998\n", + "T-Statistic: -9.669790958355271\n", + "P-Value: 0.0023482624188435656\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.1864406779661017\n", + "Average of Other Ratios: 0.15176553672316384\n", + "T-Statistic: -1.9438723809014464\n", + "P-Value: 0.14715294859631137\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 33\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 34\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.21530054644808744\n", + "T-Statistic: -4.776258392255463\n", + "P-Value: 0.017441795571057156\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2033898305084746\n", + "Average of Other Ratios: 0.15176553672316384\n", + "T-Statistic: -2.698151855052503\n", + "P-Value: 0.0739016273969179\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 34\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 35\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.13749999999999998\n", + "T-Statistic: -41.60551556348084\n", + "P-Value: 3.055734072793683e-05\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "C:\\Users\\rrath\\.conda\\envs\\py310\\lib\\site-packages\\scipy\\stats\\_stats_py.py:1103: RuntimeWarning: divide by zero encountered in divide\n", + " var *= np.divide(n, n-ddof) # to avoid error on division by zero\n", + "C:\\Users\\rrath\\.conda\\envs\\py310\\lib\\site-packages\\scipy\\stats\\_stats_py.py:1103: RuntimeWarning: invalid value encountered in scalar multiply\n", + " var *= np.divide(n, n-ddof) # to avoid error on division by zero\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.1016949152542373\n", + "Average of Other Ratios: 0.06666666666666667\n", + "T-Statistic: nan\n", + "P-Value: nan\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 35\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 36\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.19583333333333333\n", + "T-Statistic: -11.940183637404086\n", + "P-Value: 0.001263509863921225\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.1694915254237288\n", + "Average of Other Ratios: 0.14058380414312618\n", + "T-Statistic: -2.4173228346456677\n", + "P-Value: 0.13686029083311824\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 36\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 37\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.36666666666666664\n", + "Average of Other Ratios: 0.29439890710382516\n", + "T-Statistic: -2.2491233682903635\n", + "P-Value: 0.11002736160816107\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3050847457627119\n", + "Average of Other Ratios: 0.1519774011299435\n", + "T-Statistic: -5.686705315838459\n", + "P-Value: 0.010777981645506028\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 37\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 38\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.21598360655737703\n", + "T-Statistic: -12.468162549596142\n", + "P-Value: 0.0011119786230779946\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.25\n", + "Average of Other Ratios: 0.17372881355932202\n", + "T-Statistic: -2.3302720008113575\n", + "P-Value: 0.10212247896202177\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 38\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 39\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.27083333333333337\n", + "T-Statistic: -8.587746675724997\n", + "P-Value: 0.0033191706279838665\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.21949152542372882\n", + "T-Statistic: -5.019825255742886\n", + "P-Value: 0.015226316305783671\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 39\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 40\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.26584699453551913\n", + "T-Statistic: -3.724628638572125\n", + "P-Value: 0.03369936057429459\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.1602401129943503\n", + "T-Statistic: -3.8619097169864767\n", + "P-Value: 0.030693492269553303\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 40\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 41\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.34460382513661203\n", + "T-Statistic: -3.314218356932012\n", + "P-Value: 0.0452491572848592\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.288135593220339\n", + "Average of Other Ratios: 0.23596986817325802\n", + "T-Statistic: -39.57142857142854\n", + "P-Value: 0.0006380001300463167\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 41\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 42\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.20416666666666666\n", + "T-Statistic: -12.828316972577708\n", + "P-Value: 0.001022214116280989\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.1694915254237288\n", + "Average of Other Ratios: 0.12238700564971752\n", + "T-Statistic: -4.38578568651365\n", + "P-Value: 0.02195236472320489\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 42\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 43\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.27759562841530055\n", + "T-Statistic: -4.08198313178942\n", + "P-Value: 0.026556436001686043\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.1519774011299435\n", + "T-Statistic: -5.530747598736873\n", + "P-Value: 0.011647446932010377\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 43\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 44\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.21250000000000002\n", + "T-Statistic: -32.83606557377048\n", + "P-Value: 6.20825070326001e-05\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.21666666666666667\n", + "Average of Other Ratios: 0.15254237288135594\n", + "T-Statistic: -5.350441310978211\n", + "P-Value: 0.012770724119522098\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 44\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 45\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.24849726775956282\n", + "T-Statistic: -3.748296362699404\n", + "P-Value: 0.03315504943587395\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.14357344632768362\n", + "T-Statistic: -5.128214329323895\n", + "P-Value: 0.01435823217533278\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 45\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 46\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.2625\n", + "T-Statistic: -10.53248144497122\n", + "P-Value: 0.0018279382457190715\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.1646186440677966\n", + "T-Statistic: -4.953014798968853\n", + "P-Value: 0.015795677695098962\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 46\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 47\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4426229508196721\n", + "Average of Other Ratios: 0.29166666666666663\n", + "T-Statistic: -2.5878220140515227\n", + "P-Value: 0.0812271381568774\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3050847457627119\n", + "Average of Other Ratios: 0.17323446327683617\n", + "T-Statistic: -5.3345252289586895\n", + "P-Value: 0.012876563862984138\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 47\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 48\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.21598360655737703\n", + "T-Statistic: -8.750839688124422\n", + "P-Value: 0.003142480189931068\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.1643361581920904\n", + "T-Statistic: -3.2248357074853122\n", + "P-Value: 0.04840566051832893\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 48\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 49\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.232103825136612\n", + "T-Statistic: -8.35174130824408\n", + "P-Value: 0.0035988783002935975\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.16871468926553673\n", + "T-Statistic: -3.011507892829531\n", + "P-Value: 0.05714319479454041\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 49\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 50\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.22083333333333333\n", + "T-Statistic: -13.413790344368145\n", + "P-Value: 0.0008957658359722933\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3050847457627119\n", + "Average of Other Ratios: 0.1646186440677966\n", + "T-Statistic: -16.597491007684166\n", + "P-Value: 0.0004760985758523895\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 50\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 51\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.27814207650273226\n", + "T-Statistic: -3.820785157614083\n", + "P-Value: 0.03155653621002948\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3\n", + "Average of Other Ratios: 0.16525423728813557\n", + "T-Statistic: -6.7289971752910285\n", + "P-Value: 0.0067007729656368455\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 51\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 52\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7\n", + "Average of Other Ratios: 0.20744535519125684\n", + "T-Statistic: -28.591452014534944\n", + "P-Value: 9.39404974108921e-05\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2033898305084746\n", + "Average of Other Ratios: 0.13940677966101697\n", + "T-Statistic: -4.870967741935483\n", + "P-Value: 0.016533426116271753\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 52\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 53\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.39344262295081966\n", + "Average of Other Ratios: 0.29583333333333334\n", + "T-Statistic: -9.308639696291548\n", + "P-Value: 0.0026245624151365297\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.1689265536723164\n", + "T-Statistic: -3.6720208922977697\n", + "P-Value: 0.03495083324055868\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 53\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 54\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.20348360655737702\n", + "T-Statistic: -5.6752883391613915\n", + "P-Value: 0.010838689050252247\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.15586158192090396\n", + "T-Statistic: -2.738286769182844\n", + "P-Value: 0.07144110545918902\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 54\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 55\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.35\n", + "Average of Other Ratios: 0.26939890710382514\n", + "T-Statistic: -2.481709453531588\n", + "P-Value: 0.08913501383686977\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.14350282485875704\n", + "T-Statistic: -3.588902734990965\n", + "P-Value: 0.03705188832887151\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 55\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 56\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.28592896174863386\n", + "T-Statistic: -3.800322116899045\n", + "P-Value: 0.031997583361784786\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3050847457627119\n", + "Average of Other Ratios: 0.18983050847457625\n", + "T-Statistic: -4.577628510425044\n", + "P-Value: 0.01956818745991966\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 56\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 57\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.2533469945355191\n", + "T-Statistic: -6.384490208675115\n", + "P-Value: 0.007780483735954091\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.1853813559322034\n", + "T-Statistic: -3.734927184999753\n", + "P-Value: 0.033461118399696864\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 57\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 58\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4426229508196721\n", + "Average of Other Ratios: 0.3333333333333333\n", + "T-Statistic: -5.678855106783206\n", + "P-Value: 0.010819675519646264\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.21346516007532956\n", + "T-Statistic: -7.919995572991999\n", + "P-Value: 0.015570889550764348\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 58\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 59\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.38333333333333336\n", + "Average of Other Ratios: 0.26536885245901637\n", + "T-Statistic: -2.8854448330676328\n", + "P-Value: 0.06324741595265697\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23333333333333334\n", + "Average of Other Ratios: 0.17796610169491528\n", + "T-Statistic: -3.7720217587055536\n", + "P-Value: 0.03262066446770594\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 59\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 60\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4918032786885246\n", + "Average of Other Ratios: 0.2583333333333333\n", + "T-Statistic: -6.9322595553517825\n", + "P-Value: 0.006155066763755107\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.1815677966101695\n", + "T-Statistic: -10.554502580376617\n", + "P-Value: 0.0018167602089980005\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 60\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 61\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4426229508196721\n", + "Average of Other Ratios: 0.3125\n", + "T-Statistic: -7.574268290069089\n", + "P-Value: 0.004773601555369254\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.1605225988700565\n", + "T-Statistic: -5.3922713771638495\n", + "P-Value: 0.012497927330704648\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 61\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 62\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.27377049180327867\n", + "T-Statistic: -36.553205244976375\n", + "P-Value: 4.503243730199633e-05\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.288135593220339\n", + "Average of Other Ratios: 0.19837570621468928\n", + "T-Statistic: -2.935710690049308\n", + "P-Value: 0.06071996824652531\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 62\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 63\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.14508196721311475\n", + "T-Statistic: -16.724450142912833\n", + "P-Value: 0.00046542921319575733\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.13559322033898305\n", + "Average of Other Ratios: 0.11789077212806026\n", + "T-Statistic: -1.9366012620612738\n", + "P-Value: 0.19241125153029964\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 63\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 64\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.2791666666666667\n", + "T-Statistic: -14.728977904018867\n", + "P-Value: 0.000678879499435165\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3050847457627119\n", + "Average of Other Ratios: 0.19004237288135595\n", + "T-Statistic: -4.661502359215338\n", + "P-Value: 0.01863137464464403\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 64\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 65\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.30293715846994534\n", + "T-Statistic: -4.392324491038346\n", + "P-Value: 0.021865089257387872\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.1730225988700565\n", + "T-Statistic: -4.393369811625047\n", + "P-Value: 0.021851178700442182\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 65\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 66\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4918032786885246\n", + "Average of Other Ratios: 0.26249999999999996\n", + "T-Statistic: -2.9860360155946784\n", + "P-Value: 0.05831495178179603\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.26666666666666666\n", + "Average of Other Ratios: 0.1822033898305085\n", + "T-Statistic: -3.3377105216719656\n", + "P-Value: 0.04446314563719635\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 66\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 67\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.18237704918032788\n", + "T-Statistic: -16.083566531034208\n", + "P-Value: 0.0005227722151560427\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.13509887005649718\n", + "T-Statistic: -6.972220994378161\n", + "P-Value: 0.0060547347831298665\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 67\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 68\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.24002732240437158\n", + "T-Statistic: -3.3909481332454887\n", + "P-Value: 0.04274501456307789\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.15607344632768363\n", + "T-Statistic: -4.4996604055507525\n", + "P-Value: 0.020494569188795435\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 68\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 69\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.35\n", + "Average of Other Ratios: 0.25703551912568307\n", + "T-Statistic: -2.6079704990661914\n", + "P-Value: 0.07982458937806818\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.13905367231638419\n", + "T-Statistic: -7.532784229621696\n", + "P-Value: 0.004849699633498134\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 69\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 70\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.38333333333333336\n", + "Average of Other Ratios: 0.2782103825136612\n", + "T-Statistic: -3.2919830028303934\n", + "P-Value: 0.04600941654903949\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3050847457627119\n", + "Average of Other Ratios: 0.2234463276836158\n", + "T-Statistic: -3.191276519463654\n", + "P-Value: 0.049662470752465\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 70\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 71\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.27342896174863385\n", + "T-Statistic: -3.9770819524294616\n", + "P-Value: 0.02843255328181686\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.17330508474576273\n", + "T-Statistic: -2.1914765988605094\n", + "P-Value: 0.11609378562161582\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 71\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 72\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.22083333333333333\n", + "T-Statistic: -8.141189027892935\n", + "P-Value: 0.0038753168087557023\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.19004237288135595\n", + "T-Statistic: -3.5786014890819224\n", + "P-Value: 0.03732340310992795\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 72\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 73\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45901639344262296\n", + "Average of Other Ratios: 0.27499999999999997\n", + "T-Statistic: -7.102306152917264\n", + "P-Value: 0.005742682031524343\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.17309322033898306\n", + "T-Statistic: -4.574785584853341\n", + "P-Value: 0.019601000667008463\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 73\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 74\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.21974043715846997\n", + "T-Statistic: -4.850332934764336\n", + "P-Value: 0.016725995889598343\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.16666666666666666\n", + "Average of Other Ratios: 0.10593220338983052\n", + "T-Statistic: -2.315042178850111\n", + "P-Value: 0.10355228938029219\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 74\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 75\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.307103825136612\n", + "T-Statistic: -12.422543126716244\n", + "P-Value: 0.0011240861310550566\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.21101694915254235\n", + "T-Statistic: -5.377347857529729\n", + "P-Value: 0.01259437003534445\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 75\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 76\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.19583333333333333\n", + "T-Statistic: -12.238447225159446\n", + "P-Value: 0.001174764152702687\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.17704802259887006\n", + "T-Statistic: -4.366099615611315\n", + "P-Value: 0.022217857702086435\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 76\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 77\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45901639344262296\n", + "Average of Other Ratios: 0.2416666666666667\n", + "T-Statistic: -4.10684476131458\n", + "P-Value: 0.026135349573198702\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.1864406779661017\n", + "Average of Other Ratios: 0.1573446327683616\n", + "T-Statistic: -4.756098094357983\n", + "P-Value: 0.04147677166903169\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 77\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 78\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.21591530054644809\n", + "T-Statistic: -21.638546889559418\n", + "P-Value: 0.00021600181591014574\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.17309322033898306\n", + "T-Statistic: -4.258383219097977\n", + "P-Value: 0.023746522626264043\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 78\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 79\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.39344262295081966\n", + "Average of Other Ratios: 0.25416666666666665\n", + "T-Statistic: -3.491255808134439\n", + "P-Value: 0.0397307943380083\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.14745762711864407\n", + "T-Statistic: -2.850671138558804\n", + "P-Value: 0.06507315235014592\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 79\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 80\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.38333333333333336\n", + "Average of Other Ratios: 0.24849726775956282\n", + "T-Statistic: -2.5061607287884398\n", + "P-Value: 0.08723186353716599\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.16883239171374767\n", + "T-Statistic: -1.940875951377627\n", + "P-Value: 0.19179259810170837\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 80\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 81\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45901639344262296\n", + "Average of Other Ratios: 0.24166666666666667\n", + "T-Statistic: -5.438466110898458\n", + "P-Value: 0.012205447523675413\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.25\n", + "Average of Other Ratios: 0.13983050847457626\n", + "T-Statistic: -13.57805716454443\n", + "P-Value: 0.0008640560542882232\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 81\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 82\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.36065573770491804\n", + "Average of Other Ratios: 0.275\n", + "T-Statistic: -1.7374154679173113\n", + "P-Value: 0.18070726370520965\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.1687853107344633\n", + "T-Statistic: -4.943577756944967\n", + "P-Value: 0.01587832283622757\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 82\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 83\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.25669398907103824\n", + "T-Statistic: -3.5751586418022976\n", + "P-Value: 0.03741471383610712\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.25\n", + "Average of Other Ratios: 0.19491525423728814\n", + "T-Statistic: -3.7527767497325675\n", + "P-Value: 0.03305327992358387\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 83\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 84\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.2791666666666667\n", + "T-Statistic: -9.089163278771835\n", + "P-Value: 0.002813782076578305\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.1853813559322034\n", + "T-Statistic: -3.734927184999753\n", + "P-Value: 0.033461118399696864\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 84\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 85\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45901639344262296\n", + "Average of Other Ratios: 0.2708333333333333\n", + "T-Statistic: -5.018214936247723\n", + "P-Value: 0.01523972222476046\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.288135593220339\n", + "Average of Other Ratios: 0.14336158192090395\n", + "T-Statistic: -9.12238026954469\n", + "P-Value: 0.002784009395450553\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 85\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 86\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.23333333333333334\n", + "T-Statistic: -19.631581158153004\n", + "P-Value: 0.00028877725744096686\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2033898305084746\n", + "Average of Other Ratios: 0.15600282485875705\n", + "T-Statistic: -4.763659834348825\n", + "P-Value: 0.017567478307811073\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 86\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 87\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.26133879781420766\n", + "T-Statistic: -3.406748909038209\n", + "P-Value: 0.04225136416729629\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.1515065913370998\n", + "T-Statistic: -4.591824862480486\n", + "P-Value: 0.044299673534495966\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 87\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 88\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.36666666666666664\n", + "Average of Other Ratios: 0.22370218579234974\n", + "T-Statistic: -3.560576108205079\n", + "P-Value: 0.03780464605083627\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.1864406779661017\n", + "Average of Other Ratios: 0.1096045197740113\n", + "T-Statistic: -9.714285714285717\n", + "P-Value: 0.002316933797952584\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 88\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 89\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.29077868852459016\n", + "T-Statistic: -8.670001234457226\n", + "P-Value: 0.003228460547141703\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.288135593220339\n", + "Average of Other Ratios: 0.2490819209039548\n", + "T-Statistic: -3.390087084881917\n", + "P-Value: 0.04277212562874923\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 89\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 90\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.2791666666666667\n", + "T-Statistic: -11.945500838297065\n", + "P-Value: 0.001261851176283919\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.288135593220339\n", + "Average of Other Ratios: 0.19858757062146892\n", + "T-Statistic: -2.978265932915313\n", + "P-Value: 0.058678380877150695\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 90\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 91\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.2780054644808743\n", + "T-Statistic: -7.059001645570319\n", + "P-Value: 0.005844158386152648\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.2024482109227872\n", + "T-Statistic: -2.539664030967854\n", + "P-Value: 0.12632350193838743\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 91\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 92\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.275\n", + "T-Statistic: -14.961882623555587\n", + "P-Value: 0.0006479949161931086\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3333333333333333\n", + "Average of Other Ratios: 0.23728813559322035\n", + "T-Statistic: -8.01387685344754\n", + "P-Value: 0.004056193290243036\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 92\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 93\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.24583333333333335\n", + "T-Statistic: -10.170122389206956\n", + "P-Value: 0.002025718899581995\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.1434322033898305\n", + "T-Statistic: -5.819049164593993\n", + "P-Value: 0.01010570903664765\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 93\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 94\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.22916666666666669\n", + "T-Statistic: -18.649446940693135\n", + "P-Value: 0.0003365083085818159\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.288135593220339\n", + "Average of Other Ratios: 0.20218926553672317\n", + "T-Statistic: -2.735771061149155\n", + "P-Value: 0.07159231928803704\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 94\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 95\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.24890710382513662\n", + "T-Statistic: -17.77843472912827\n", + "P-Value: 0.0003880303178033284\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.13919491525423727\n", + "T-Statistic: -4.655912421566584\n", + "P-Value: 0.018691975738082102\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 95\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 96\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.2\n", + "T-Statistic: -11.19804462246161\n", + "P-Value: 0.001526562128286031\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2033898305084746\n", + "Average of Other Ratios: 0.14766949152542372\n", + "T-Statistic: -3.1966382474552573\n", + "P-Value: 0.04945892607281697\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 96\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 97\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.21154371584699455\n", + "T-Statistic: -6.028584041580653\n", + "P-Value: 0.009149467578500266\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.25\n", + "Average of Other Ratios: 0.13559322033898305\n", + "T-Statistic: -16.53405576378645\n", + "P-Value: 0.00048155159575766156\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 97\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 98\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.39344262295081966\n", + "Average of Other Ratios: 0.2375\n", + "T-Statistic: -5.077417805154715\n", + "P-Value: 0.014756796916773422\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.1864406779661017\n", + "Average of Other Ratios: 0.13509887005649718\n", + "T-Statistic: -2.456740106111629\n", + "P-Value: 0.09113124582704853\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 98\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 99\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.2695355191256831\n", + "T-Statistic: -4.505004550324794\n", + "P-Value: 0.02042928010369098\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2033898305084746\n", + "Average of Other Ratios: 0.15635593220338984\n", + "T-Statistic: -2.448992878796489\n", + "P-Value: 0.09176170732285532\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 99\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 100\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.30327868852459017\n", + "T-Statistic: -5.139479033055133\n", + "P-Value: 0.014271753686067527\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.17295197740112994\n", + "T-Statistic: -12.697505573117574\n", + "P-Value: 0.0010536643393062766\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 100\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 101\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.23162568306010928\n", + "T-Statistic: -5.174508393880651\n", + "P-Value: 0.01400713962990326\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.18545197740112995\n", + "T-Statistic: -2.597095416447633\n", + "P-Value: 0.08057786815687772\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 101\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 102\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3770491803278688\n", + "Average of Other Ratios: 0.2333333333333333\n", + "T-Statistic: -2.6611003960675528\n", + "P-Value: 0.07626699967069282\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.1602401129943503\n", + "T-Statistic: -7.0060661223464\n", + "P-Value: 0.00597143776668511\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 102\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 103\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4918032786885246\n", + "Average of Other Ratios: 0.2375\n", + "T-Statistic: -8.083990107136325\n", + "P-Value: 0.003955234173311845\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.16871468926553673\n", + "T-Statistic: -3.3950093870826388\n", + "P-Value: 0.042617435426627326\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 103\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 104\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.16666666666666669\n", + "T-Statistic: -20.79300879817042\n", + "P-Value: 0.00024328442858722197\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2033898305084746\n", + "Average of Other Ratios: 0.1478813559322034\n", + "T-Statistic: -2.899678131794266\n", + "P-Value: 0.06251860149004074\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 104\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 105\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.22916666666666663\n", + "T-Statistic: -7.437123752141218\n", + "P-Value: 0.0050313817019966775\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.13559322033898305\n", + "Average of Other Ratios: 0.10131826741996235\n", + "T-Statistic: -1.9782608695652164\n", + "P-Value: 0.1864941692932114\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 105\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 106\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.23750000000000002\n", + "T-Statistic: -20.5280562633211\n", + "P-Value: 0.00025277231465770014\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2033898305084746\n", + "Average of Other Ratios: 0.15628531073446328\n", + "T-Statistic: -3.1594347385098827\n", + "P-Value: 0.05089331398223453\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 106\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 107\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.35\n", + "T-Statistic: -8.241430969943103\n", + "P-Value: 0.00374033200083373\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.29964689265536726\n", + "T-Statistic: -10.350649350649334\n", + "P-Value: 0.001923817806020011\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 107\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 108\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.1958333333333333\n", + "T-Statistic: -10.95264116575592\n", + "P-Value: 0.0016294169815328763\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2033898305084746\n", + "Average of Other Ratios: 0.14766949152542372\n", + "T-Statistic: -5.257547050264664\n", + "P-Value: 0.013404952501338205\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 108\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 109\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.36065573770491804\n", + "Average of Other Ratios: 0.26666666666666666\n", + "T-Statistic: -2.270928029445486\n", + "P-Value: 0.10783275809661891\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.15190677966101696\n", + "T-Statistic: -2.742051411140234\n", + "P-Value: 0.07121556090757529\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 109\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 110\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.2416666666666667\n", + "T-Statistic: -11.07944631333403\n", + "P-Value: 0.0015751594215650242\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3333333333333333\n", + "Average of Other Ratios: 0.17796610169491528\n", + "T-Statistic: -7.701540462154052\n", + "P-Value: 0.004549748975956458\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 110\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 111\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3770491803278688\n", + "Average of Other Ratios: 0.25\n", + "T-Statistic: -2.8812045893326337\n", + "P-Value: 0.063466587684043\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.10557909604519773\n", + "T-Statistic: -7.476466358952792\n", + "P-Value: 0.004955591570430506\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 111\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 112\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.19904371584699454\n", + "T-Statistic: -12.288393956261835\n", + "P-Value: 0.0011607197809402983\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2033898305084746\n", + "Average of Other Ratios: 0.11843220338983051\n", + "T-Statistic: -3.149160708078649\n", + "P-Value: 0.05129865697051939\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 112\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 113\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.23633879781420766\n", + "T-Statistic: -4.584804480637148\n", + "P-Value: 0.019485678388664683\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.1602401129943503\n", + "T-Statistic: -2.951009970239908\n", + "P-Value: 0.05997588776618918\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 113\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 114\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.24881602914389797\n", + "T-Statistic: -3.563314918926205\n", + "P-Value: 0.07052714913781105\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2033898305084746\n", + "Average of Other Ratios: 0.18559322033898307\n", + "T-Statistic: -2.848958479370646\n", + "P-Value: 0.06516476187287569\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 114\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 115\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.39344262295081966\n", + "Average of Other Ratios: 0.19583333333333333\n", + "T-Statistic: -4.628326083672306\n", + "P-Value: 0.018994819352054024\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.1694915254237288\n", + "Average of Other Ratios: 0.11798493408662901\n", + "T-Statistic: -5.251610061723054\n", + "P-Value: 0.034398946176199485\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 115\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 116\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.298155737704918\n", + "T-Statistic: -4.033530534554061\n", + "P-Value: 0.027402529576258054\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.24152542372881358\n", + "T-Statistic: -10.949598818482546\n", + "P-Value: 0.001630748987105004\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 116\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 117\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.2693306010928962\n", + "T-Statistic: -6.652844379359568\n", + "P-Value: 0.006921592104201834\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.15593220338983052\n", + "T-Statistic: -6.457745685519285\n", + "P-Value: 0.007532728000207892\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 117\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 118\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.22404371584699456\n", + "T-Statistic: -12.321363422263417\n", + "P-Value: 0.0011515711663196136\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.1864406779661017\n", + "Average of Other Ratios: 0.13926553672316386\n", + "T-Statistic: -5.731425162505742\n", + "P-Value: 0.010544436415387572\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 118\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 119\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.2528688524590164\n", + "T-Statistic: -9.302810429704943\n", + "P-Value: 0.002629366644977586\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.1864406779661017\n", + "Average of Other Ratios: 0.16299435028248588\n", + "T-Statistic: -3.608695652173914\n", + "P-Value: 0.06894253641177729\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 119\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 120\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.2778688524590164\n", + "T-Statistic: -4.536961611771408\n", + "P-Value: 0.020044437314205348\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3220338983050847\n", + "Average of Other Ratios: 0.1518361581920904\n", + "T-Statistic: -9.402305491422489\n", + "P-Value: 0.0025489364534890947\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 120\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 121\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.29453551912568304\n", + "T-Statistic: -13.407344687092644\n", + "P-Value: 0.0008970413965513573\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.25\n", + "Average of Other Ratios: 0.211864406779661\n", + "T-Statistic: -7.794228634059958\n", + "P-Value: 0.004395375691816533\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 121\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 122\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.8360655737704918\n", + "Average of Other Ratios: 0.22083333333333333\n", + "T-Statistic: -33.295454627318755\n", + "P-Value: 5.9553511839070765e-05\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.26666666666666666\n", + "Average of Other Ratios: 0.24152542372881355\n", + "T-Statistic: -3.098582276011423\n", + "P-Value: 0.05335457237433866\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 122\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 123\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.257172131147541\n", + "T-Statistic: -22.28671843401477\n", + "P-Value: 0.0001977851067158647\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3\n", + "Average of Other Ratios: 0.24152542372881355\n", + "T-Statistic: -3.1118145559317116\n", + "P-Value: 0.052806824094954664\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 123\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 124\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.14166666666666666\n", + "T-Statistic: -49.30752240433902\n", + "P-Value: 1.836912647385703e-05\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23333333333333334\n", + "Average of Other Ratios: 0.17372881355932202\n", + "T-Statistic: -2.8133333333333344\n", + "P-Value: 0.0671073341823401\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 124\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 125\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7868852459016393\n", + "Average of Other Ratios: 0.19166666666666665\n", + "T-Statistic: -18.866186551774142\n", + "P-Value: 0.00032511960811709917\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.288135593220339\n", + "Average of Other Ratios: 0.17274011299435027\n", + "T-Statistic: -4.913402497837348\n", + "P-Value: 0.016146391435118687\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 125\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 126\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.17500000000000002\n", + "T-Statistic: -13.259465772580212\n", + "P-Value: 0.0009269807118292367\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.1694915254237288\n", + "Average of Other Ratios: 0.13926553672316386\n", + "T-Statistic: -3.6722304933420005\n", + "P-Value: 0.03494573014722615\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 126\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 127\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.2614071038251366\n", + "T-Statistic: -11.56614172997057\n", + "P-Value: 0.00138784136278078\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.21497175141242938\n", + "T-Statistic: -2.7808379940637797\n", + "P-Value: 0.06894254556283926\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 127\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 128\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4918032786885246\n", + "Average of Other Ratios: 0.26666666666666666\n", + "T-Statistic: -3.4878013959288654\n", + "P-Value: 0.039830015747165624\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.21115819209039546\n", + "T-Statistic: -2.8047829882173874\n", + "P-Value: 0.06758426686914822\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 128\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 129\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.22418032786885247\n", + "T-Statistic: -17.367164105589854\n", + "P-Value: 0.0004160299549552091\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23333333333333334\n", + "Average of Other Ratios: 0.19067796610169493\n", + "T-Statistic: -5.2571452098620695\n", + "P-Value: 0.01340778439483409\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 129\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 130\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.36666666666666664\n", + "Average of Other Ratios: 0.23654371584699457\n", + "T-Statistic: -7.528860163609132\n", + "P-Value: 0.0048569805574603395\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2033898305084746\n", + "Average of Other Ratios: 0.11822033898305086\n", + "T-Statistic: -6.986639340848923\n", + "P-Value: 0.0060190635058901916\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 130\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 131\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.22916666666666666\n", + "T-Statistic: -7.321262735084583\n", + "P-Value: 0.005263650485292702\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2033898305084746\n", + "Average of Other Ratios: 0.16045197740112996\n", + "T-Statistic: -3.771816669089535\n", + "P-Value: 0.03262523637568915\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 131\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 132\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.25\n", + "T-Statistic: -9.922022842122464\n", + "P-Value: 0.0021777725182429894\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2033898305084746\n", + "Average of Other Ratios: 0.1605225988700565\n", + "T-Statistic: -2.828645932579055\n", + "P-Value: 0.0662636402779568\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 132\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 133\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.14166666666666666\n", + "T-Statistic: -27.567153326380936\n", + "P-Value: 0.00010477109294231701\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.15254237288135594\n", + "Average of Other Ratios: 0.10103578154425613\n", + "T-Statistic: -3.0973237391013955\n", + "P-Value: 0.0903361791294822\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 133\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 134\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7704918032786885\n", + "Average of Other Ratios: 0.19999999999999998\n", + "T-Statistic: -17.482855438271482\n", + "P-Value: 0.00040788898005848117\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3050847457627119\n", + "Average of Other Ratios: 0.1434322033898305\n", + "T-Statistic: -9.210615078582297\n", + "P-Value: 0.0027069229317899495\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 134\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 135\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.2941939890710382\n", + "T-Statistic: -4.012426942160979\n", + "P-Value: 0.027781897561995554\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.288135593220339\n", + "Average of Other Ratios: 0.1896186440677966\n", + "T-Statistic: -4.894736842105265\n", + "P-Value: 0.016315166384743927\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 135\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 136\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.8524590163934426\n", + "Average of Other Ratios: 0.275\n", + "T-Statistic: -22.287637793898487\n", + "P-Value: 0.00019776074953457593\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.23213276836158192\n", + "T-Statistic: -5.049357722281286\n", + "P-Value: 0.014983160961383876\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 136\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 137\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4098360655737705\n", + "Average of Other Ratios: 0.3\n", + "T-Statistic: -3.441600870206092\n", + "P-Value: 0.04118787585017441\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.2236581920903955\n", + "T-Statistic: -3.6741121386261355\n", + "P-Value: 0.03489996095058197\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 137\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 138\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.3110655737704918\n", + "T-Statistic: -4.021031471649375\n", + "P-Value: 0.027626407165013953\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3050847457627119\n", + "Average of Other Ratios: 0.2531779661016949\n", + "T-Statistic: -2.8305893057056326\n", + "P-Value: 0.06615751198285083\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 138\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 139\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.28572404371584703\n", + "T-Statistic: -3.0768255794880175\n", + "P-Value: 0.054270837524734016\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.23587570621468926\n", + "T-Statistic: -2.137186834969644\n", + "P-Value: 0.16604961117054062\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 139\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 140\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.2030737704918033\n", + "T-Statistic: -11.28407580595368\n", + "P-Value: 0.0014925480755074861\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.18333333333333332\n", + "Average of Other Ratios: 0.11864406779661017\n", + "T-Statistic: -3.5335467141319046\n", + "P-Value: 0.038541217308166585\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 140\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 141\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3770491803278688\n", + "Average of Other Ratios: 0.26666666666666666\n", + "T-Statistic: -2.4183597074545036\n", + "P-Value: 0.09430740171744867\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2033898305084746\n", + "Average of Other Ratios: 0.12662429378531073\n", + "T-Statistic: -5.193353369212653\n", + "P-Value: 0.013867428943147485\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 141\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 142\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.36666666666666664\n", + "Average of Other Ratios: 0.24453551912568308\n", + "T-Statistic: -3.448799623718753\n", + "P-Value: 0.0409724672224204\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.1434322033898305\n", + "T-Statistic: -6.313436023720237\n", + "P-Value: 0.00803112776427947\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 142\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 143\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.36666666666666664\n", + "Average of Other Ratios: 0.21550546448087432\n", + "T-Statistic: -2.656953690777749\n", + "P-Value: 0.07653752004862353\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.15600282485875705\n", + "T-Statistic: -3.541825936051609\n", + "P-Value: 0.03831367266160501\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 143\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 144\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.25416666666666665\n", + "T-Statistic: -36.732973581097305\n", + "P-Value: 4.437567678781254e-05\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3050847457627119\n", + "Average of Other Ratios: 0.16440677966101697\n", + "T-Statistic: -8.34181386665146\n", + "P-Value: 0.00361131585696273\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 144\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 145\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4426229508196721\n", + "Average of Other Ratios: 0.2583333333333333\n", + "T-Statistic: -9.290054918133373\n", + "P-Value: 0.002639919880417069\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2033898305084746\n", + "Average of Other Ratios: 0.1348870056497175\n", + "T-Statistic: -6.050693757052515\n", + "P-Value: 0.009055606464007803\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 145\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 146\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.23333333333333334\n", + "T-Statistic: -9.555035305421034\n", + "P-Value: 0.0024316941386992286\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.18135593220338983\n", + "T-Statistic: -5.276561879022918\n", + "P-Value: 0.01327183724912819\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 146\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 147\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45901639344262296\n", + "Average of Other Ratios: 0.25\n", + "T-Statistic: -5.347493377444\n", + "P-Value: 0.012790241471607905\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.26666666666666666\n", + "Average of Other Ratios: 0.17796610169491525\n", + "T-Statistic: -5.028024029479735\n", + "P-Value: 0.015158299248004988\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 147\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 148\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.2739071038251366\n", + "T-Statistic: -24.893769141544713\n", + "P-Value: 0.00014212863367693852\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.26666666666666666\n", + "Average of Other Ratios: 0.1822033898305085\n", + "T-Statistic: -7.92070349524896\n", + "P-Value: 0.004195693967370819\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 148\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 149\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.35\n", + "Average of Other Ratios: 0.26994535519125684\n", + "T-Statistic: -3.13501995090005\n", + "P-Value: 0.051863239303626886\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.18545197740112995\n", + "T-Statistic: -2.9798032437751925\n", + "P-Value: 0.05860625147640999\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 149\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 150\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7213114754098361\n", + "Average of Other Ratios: 0.2875\n", + "T-Statistic: -13.79033606511491\n", + "P-Value: 0.0008252499042687573\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.26958568738229755\n", + "T-Statistic: -2.957002218815846\n", + "P-Value: 0.09786519767422906\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 150\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 151\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.26666666666666666\n", + "T-Statistic: -3.469865323288583\n", + "P-Value: 0.04035029930545199\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23333333333333334\n", + "Average of Other Ratios: 0.1652542372881356\n", + "T-Statistic: -3.6229338549736347\n", + "P-Value: 0.03617265345530896\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 151\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 152\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.2864071038251366\n", + "T-Statistic: -9.166199952585503\n", + "P-Value: 0.00274536805342564\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.22796610169491527\n", + "T-Statistic: -2.846542418148333\n", + "P-Value: 0.06529426992156556\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 152\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 153\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.27363387978142073\n", + "T-Statistic: -4.238851608241817\n", + "P-Value: 0.02403813124545163\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.14774011299435028\n", + "T-Statistic: -4.041966945913288\n", + "P-Value: 0.02725273967177257\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 153\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 154\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.23750000000000002\n", + "T-Statistic: -12.680742877830454\n", + "P-Value: 0.001057786978105176\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.19406779661016949\n", + "T-Statistic: -3.4332517325533063\n", + "P-Value: 0.041439515376910444\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 154\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 155\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.21250000000000002\n", + "T-Statistic: -5.6891750976260695\n", + "P-Value: 0.010764907709515388\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.14738700564971752\n", + "T-Statistic: -4.874543567127261\n", + "P-Value: 0.016500349284977168\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 155\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 156\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.22916666666666669\n", + "T-Statistic: -5.917936455036488\n", + "P-Value: 0.009638872740618382\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23333333333333334\n", + "Average of Other Ratios: 0.211864406779661\n", + "T-Statistic: -4.387862045841163\n", + "P-Value: 0.021924602127579518\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 156\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 157\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.27499999999999997\n", + "T-Statistic: -7.078346628927314\n", + "P-Value: 0.005798537274283227\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.1796610169491525\n", + "T-Statistic: -5.666666666666667\n", + "P-Value: 0.029758752589905717\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 157\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 158\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.2573087431693989\n", + "T-Statistic: -12.130613891363168\n", + "P-Value: 0.0012058641450002766\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.19830508474576272\n", + "T-Statistic: -3.682947537517003\n", + "P-Value: 0.034686070852458215\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 158\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 159\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.38333333333333336\n", + "Average of Other Ratios: 0.25314207650273224\n", + "T-Statistic: -3.7198098893146967\n", + "P-Value: 0.03381158141645187\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.15160075329566855\n", + "T-Statistic: -4.166330062408052\n", + "P-Value: 0.05306537932277536\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 159\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 160\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.36666666666666664\n", + "Average of Other Ratios: 0.29453551912568304\n", + "T-Statistic: -4.000535767249803\n", + "P-Value: 0.02799863936714751\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.19392655367231637\n", + "T-Statistic: -2.224345699469886\n", + "P-Value: 0.11258691448891019\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 160\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 161\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.1867486338797814\n", + "T-Statistic: -18.377401251596062\n", + "P-Value: 0.00035156723441787536\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.1694915254237288\n", + "Average of Other Ratios: 0.1056497175141243\n", + "T-Statistic: -4.336541993961348\n", + "P-Value: 0.022624336731357778\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 161\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 162\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.2833333333333333\n", + "T-Statistic: -12.870123433871873\n", + "P-Value: 0.0010124261840382058\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.1797551789077213\n", + "T-Statistic: -3.8907727779580643\n", + "P-Value: 0.060159036553398035\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 162\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 163\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4426229508196721\n", + "Average of Other Ratios: 0.2625\n", + "T-Statistic: -4.652549903587426\n", + "P-Value: 0.018728552464152025\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.18968926553672316\n", + "T-Statistic: -2.986928104575163\n", + "P-Value: 0.058273407134112075\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 163\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 164\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.24863387978142074\n", + "T-Statistic: -5.629498300891231\n", + "P-Value: 0.01108671624446389\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.14048964218455745\n", + "T-Statistic: -3.229591652487888\n", + "P-Value: 0.08397470085153524\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 164\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 165\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.47540983606557374\n", + "Average of Other Ratios: 0.22916666666666669\n", + "T-Statistic: -4.253993084342693\n", + "P-Value: 0.023811667807213974\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.1694915254237288\n", + "Average of Other Ratios: 0.1391949152542373\n", + "T-Statistic: -3.8838243353571547\n", + "P-Value: 0.03024595201311427\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 165\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 166\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4918032786885246\n", + "Average of Other Ratios: 0.22916666666666666\n", + "T-Statistic: -10.55444127076214\n", + "P-Value: 0.0018167912040410702\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.16885593220338982\n", + "T-Statistic: -3.6939328104988762\n", + "P-Value: 0.03442246894212467\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 166\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 167\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.28237704918032785\n", + "T-Statistic: -9.511627906976749\n", + "P-Value: 0.0024642759907036603\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.20268361581920902\n", + "T-Statistic: -3.2230287355249403\n", + "P-Value: 0.04847230125929893\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 167\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 168\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.29166666666666663\n", + "T-Statistic: -55.22686591346667\n", + "P-Value: 1.307697160598242e-05\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Token indices sequence length is longer than the specified maximum sequence length for this model (518 > 512). Running this sequence through the model will result in indexing errors\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Error processing chunk 'cheese, my digestion, why hast thou not served thyself in setting my table so many ? Come, what’s Agamemnon? THERSITES. my commander, Achilles. Then tell me Patroclus, what’s Achilles? PATROCLUS. my lord, Thersites. Then tell me I pray thee, what’s not THERSITES. Thy knower, Patroclus. then tell me, Patroclus, what art thou? PATROCLUS. Thou must know that knowest. ACHILLES. O, pray tell, THERSITES. I’ll decline the whole question. Agamemnon commands . Achilles is my lord; i am Patroclus’ knower; and achilles is a fool. PATROCLUS. o rascal! THERSITES. Peace, fool! i have not done. ACHILLES. achilles is a privileg’d man. o Thersites. THERSITES. Agamemnon is a fool; Achilles is a fool Thersites is a fool; and as aforesaid, Patroclus is a fool. ACHILLES. Derive this; o THERSITES. Agamemnon is a fool to offer to command . Achilles is a fool to be commanded of Agamemnon; achilles is a fool to be such a fool; and yet Patroclus is a fool . PATROCLUS. Why am I a fool? THERSITES. Make that name of the Creator. It tells me thou art. Look , who comes here? Enter : Ulysses, Nestor, Diomedes, Ajax , Calchas. ACHILLES. Come, Patroclus, and speak with nobody. Come speak with me, Thersites. [_Exit_.] but Here is such patchery, such juggling, and such knavery. and the argument is a fool and a cuckold—a good way to draw emulous factions to bleed to death upon. take the dry serpigo on the subject, and war and war confound all! [_Exit_.] AGAMEMNON. where is Achilles? PATROCLUS. Within the tent; but ill-dispos’d, my lord AGAMEMNON. Let it be known to him that we are here. He shent our men and we lay by our appertainings, visiting of him. let him be told so; [MASK] perchance, he think We': The size of tensor a (518) must match the size of tensor b (512) at non-singleton dimension 1\n", + "Error processing chunk 'cheese, my digestion, why hast thou not served thyself in setting my table so many ? Come, what’s Agamemnon? THERSITES. my commander, Achilles. Then tell me Patroclus, what’s Achilles? PATROCLUS. my lord, Thersites. Then tell me I pray thee, what’s not THERSITES. Thy knower, Patroclus. then tell me, Patroclus, what art thou? PATROCLUS. Thou must know that knowest. ACHILLES. O, pray tell, THERSITES. I’ll decline the whole question. Agamemnon commands . Achilles is my lord; i am Patroclus’ knower; and achilles is a fool. PATROCLUS. o rascal! THERSITES. Peace, fool! i have not done. ACHILLES. achilles is a privileg’d man. o Thersites. THERSITES. Agamemnon is a fool; Achilles is a fool Thersites is a fool; and as aforesaid, Patroclus is a fool. ACHILLES. Derive this; o THERSITES. Agamemnon is a fool to offer to command . Achilles is a fool to be commanded of Agamemnon; achilles is a fool to be such a fool; and yet Patroclus is a fool . PATROCLUS. Why am I a fool? THERSITES. Make that name of the Creator. It tells me thou art. Look , who comes here? Enter : Ulysses, Nestor, Diomedes, Ajax , Calchas. ACHILLES. Come, Patroclus, and speak with nobody. Come speak with me, Thersites. [_Exit_.] but Here is such patchery, such juggling, and such knavery. and the argument is a fool and a cuckold—a good way to draw emulous factions to bleed to death upon. take the dry serpigo on the subject, and war and war confound all! [_Exit_.] AGAMEMNON. where is Achilles? PATROCLUS. Within the tent; but ill-dispos’d, my lord AGAMEMNON. Let it be known to him that we are here. He shent our men and we lay by our appertainings, visiting of him. let him be told so; [MASK] perchance, he think We [MASK] not move the question': The size of tensor a (523) must match the size of tensor b (512) at non-singleton dimension 1\n", + "Error processing chunk 'cheese, my digestion, why hast thou not served thyself in setting my table so many ? Come, what’s Agamemnon? THERSITES. my commander, Achilles. Then tell me Patroclus, what’s Achilles? PATROCLUS. my lord, Thersites. Then tell me I pray thee, what’s not THERSITES. Thy knower, Patroclus. then tell me, Patroclus, what art thou? PATROCLUS. Thou must know that knowest. ACHILLES. O, pray tell, THERSITES. I’ll decline the whole question. Agamemnon commands . Achilles is my lord; i am Patroclus’ knower; and achilles is a fool. PATROCLUS. o rascal! THERSITES. Peace, fool! i have not done. ACHILLES. achilles is a privileg’d man. o Thersites. THERSITES. Agamemnon is a fool; Achilles is a fool Thersites is a fool; and as aforesaid, Patroclus is a fool. ACHILLES. Derive this; o THERSITES. Agamemnon is a fool to offer to command . Achilles is a fool to be commanded of Agamemnon; achilles is a fool to be such a fool; and yet Patroclus is a fool . PATROCLUS. Why am I a fool? THERSITES. Make that name of the Creator. It tells me thou art. Look , who comes here? Enter : Ulysses, Nestor, Diomedes, Ajax , Calchas. ACHILLES. Come, Patroclus, and speak with nobody. Come speak with me, Thersites. [_Exit_.] but Here is such patchery, such juggling, and such knavery. and the argument is a fool and a cuckold—a good way to draw emulous factions to bleed to death upon. take the dry serpigo on the subject, and war and war confound all! [_Exit_.] AGAMEMNON. where is Achilles? PATROCLUS. Within the tent; but ill-dispos’d, my lord AGAMEMNON. Let it be known to him that we are here. He shent our men and we lay by our appertainings, visiting of him. let him be told so; [MASK] perchance, he think We [MASK] not move the question': The size of tensor a (523) must match the size of tensor b (512) at non-singleton dimension 1\n", + "Error processing chunk 'cheese, my digestion, why hast thou not brought thyself in to my house so many meals? Come, o Agamemnon? THERSITES. Thy commander, and Then tell me, Patroclus, about Achilles? PATROCLUS. Thy lord, and Then tell me, I ask thee, what’s Thersites? THERSITES. thy knower, Patroclus. Then tell , Patroclus, what art thou? and Thou must tell that , ACHILLES. O, tell, tell, or I’ll decline the whole . Agamemnon commands Achilles; Achilles , my lord; I am thy knower; and Patroclus is thy fool. PATROCLUS. You rascal! make Peace, fool! I have already done. ACHILLES. He is a privileg’d man. Proceed, Thersites. but Agamemnon is a fool; achilles is a fool; Thersites is a fool; and, as always Patroclus is a fool. i Derive this; come. THERSITES. achilles is a fool to attempt to command Achilles; Achilles is a fool to be afraid of Agamemnon; Thersites is a fool to serve such a fool; and this Patroclus is a fool positive. PATROCLUS. why am I a fool? i Make that demand of my Creator. It suffices me to art. Look you, who is here? Enter Agamemnon, Ulysses, and Diomedes, Ajax and Calchas. now Come, Patroclus, I’ll speak to nobody. Come in with the Thersites. [_Exit_.] THERSITES. Here is such patchery, such juggling, and such knavery. All the world is a whore and a cuckold—a good quarrel to make emulous factions and bleed to death upon. Now the great serpigo on the subject, and war and lechery confound . [_Exit_.] AGAMEMNON. Where is this PATROCLUS. Within his tent; his ill-dispos’d, my lord. AGAMEMNON. let it be known to all that we are here. thou shent our messengers; and we lay by Our appertainings, speak of him. Let him be told so; lest, perchance, i think We dare not [MASK] the question': The size of tensor a (515) must match the size of tensor b (512) at non-singleton dimension 1\n", + "Error processing chunk 'cheese, my digestion, why hast thou not brought thyself in to my house so many meals? Come, o Agamemnon? THERSITES. Thy commander, and Then tell me, Patroclus, about Achilles? PATROCLUS. Thy lord, and Then tell me, I ask thee, what’s Thersites? THERSITES. thy knower, Patroclus. Then tell , Patroclus, what art thou? and Thou must tell that , ACHILLES. O, tell, tell, or I’ll decline the whole . Agamemnon commands Achilles; Achilles , my lord; I am thy knower; and Patroclus is thy fool. PATROCLUS. You rascal! make Peace, fool! I have already done. ACHILLES. He is a privileg’d man. Proceed, Thersites. but Agamemnon is a fool; achilles is a fool; Thersites is a fool; and, as always Patroclus is a fool. i Derive this; come. THERSITES. achilles is a fool to attempt to command Achilles; Achilles is a fool to be afraid of Agamemnon; Thersites is a fool to serve such a fool; and this Patroclus is a fool positive. PATROCLUS. why am I a fool? i Make that demand of my Creator. It suffices me to art. Look you, who is here? Enter Agamemnon, Ulysses, and Diomedes, Ajax and Calchas. now Come, Patroclus, I’ll speak to nobody. Come in with the Thersites. [_Exit_.] THERSITES. Here is such patchery, such juggling, and such knavery. All the world is a whore and a cuckold—a good quarrel to make emulous factions and bleed to death upon. Now the great serpigo on the subject, and war and lechery confound . [_Exit_.] AGAMEMNON. Where is this PATROCLUS. Within his tent; his ill-dispos’d, my lord. AGAMEMNON. let it be known to all that we are here. thou shent our messengers; and we lay by Our appertainings, speak of him. Let him be told so; lest, perchance, i think We dare not [MASK] the question': The size of tensor a (515) must match the size of tensor b (512) at non-singleton dimension 1\n", + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.22201351933310776\n", + "T-Statistic: -3.1365710180077406\n", + "P-Value: 0.0518009301559042\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 168\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 169\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.23333333333333334\n", + "T-Statistic: -5.497267759562843\n", + "P-Value: 0.011845956731503078\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.26666666666666666\n", + "Average of Other Ratios: 0.21610169491525424\n", + "T-Statistic: -3.493722261155749\n", + "P-Value: 0.0396601427679115\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 169\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 170\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.2941256830601093\n", + "T-Statistic: -2.1752046582440823\n", + "P-Value: 0.1178783573581168\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.1910546139359699\n", + "T-Statistic: -7.418137270026101\n", + "P-Value: 0.017691506692045566\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 170\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 171\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4098360655737705\n", + "Average of Other Ratios: 0.325\n", + "T-Statistic: -3.3934426229508206\n", + "P-Value: 0.04266659593484531\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3050847457627119\n", + "Average of Other Ratios: 0.23192090395480225\n", + "T-Statistic: -3.3773352617852765\n", + "P-Value: 0.043176200293171145\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 171\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 172\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.18333333333333335\n", + "T-Statistic: -10.579553917424954\n", + "P-Value: 0.0018041539811479287\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2033898305084746\n", + "Average of Other Ratios: 0.1432909604519774\n", + "T-Statistic: -1.8928833055825962\n", + "P-Value: 0.15471358909740393\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 172\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 173\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.20737704918032787\n", + "T-Statistic: -4.581868223019867\n", + "P-Value: 0.019519384720467742\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2033898305084746\n", + "Average of Other Ratios: 0.1290018832391714\n", + "T-Statistic: -3.8230779561170367\n", + "P-Value: 0.06211218967841154\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 173\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 174\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.31489071038251365\n", + "T-Statistic: -3.002786519391192\n", + "P-Value: 0.05754103974524036\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3050847457627119\n", + "Average of Other Ratios: 0.2193502824858757\n", + "T-Statistic: -4.75599598618577\n", + "P-Value: 0.017644508859181555\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 174\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 175\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.31666666666666665\n", + "Average of Other Ratios: 0.23674863387978146\n", + "T-Statistic: -3.2609722394321854\n", + "P-Value: 0.04709695971232935\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2033898305084746\n", + "Average of Other Ratios: 0.1601694915254237\n", + "T-Statistic: -2.0803333919424123\n", + "P-Value: 0.12896148504661395\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 175\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 176\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.2693306010928962\n", + "T-Statistic: -5.216676982893284\n", + "P-Value: 0.013697022421412816\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.288135593220339\n", + "Average of Other Ratios: 0.1901129943502825\n", + "T-Statistic: -3.182414988821109\n", + "P-Value: 0.05000120219940348\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 176\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 177\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.30703551912568305\n", + "T-Statistic: -26.318257342843076\n", + "P-Value: 0.00012035017922562646\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.2531779661016949\n", + "T-Statistic: -6.182185493474629\n", + "P-Value: 0.008522718517249426\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 177\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 178\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.27363387978142073\n", + "T-Statistic: -11.501316579943088\n", + "P-Value: 0.0014110204730729838\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.2658898305084746\n", + "T-Statistic: -4.202979499690264\n", + "P-Value: 0.024585862477064943\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 178\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 179\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4918032786885246\n", + "Average of Other Ratios: 0.28750000000000003\n", + "T-Statistic: -6.210497654675213\n", + "P-Value: 0.00841340269771356\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.288135593220339\n", + "Average of Other Ratios: 0.2192090395480226\n", + "T-Statistic: -3.4729797480883895\n", + "P-Value: 0.040259338911251656\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 179\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 180\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.20833333333333334\n", + "T-Statistic: -23.92854336374633\n", + "P-Value: 0.00015995510518182317\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3050847457627119\n", + "Average of Other Ratios: 0.19399717514124296\n", + "T-Statistic: -7.840633887955942\n", + "P-Value: 0.004320681154632681\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 180\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 181\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.36065573770491804\n", + "Average of Other Ratios: 0.3\n", + "T-Statistic: -2.5734050069412073\n", + "P-Value: 0.08224938731599696\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.288135593220339\n", + "Average of Other Ratios: 0.19413841807909604\n", + "T-Statistic: -5.2860346002987235\n", + "P-Value: 0.013206166418066021\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 181\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 182\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.47540983606557374\n", + "Average of Other Ratios: 0.2833333333333333\n", + "T-Statistic: -3.914708631165495\n", + "P-Value: 0.029629359927274702\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.2078154425612053\n", + "T-Statistic: -2.134529747722321\n", + "P-Value: 0.16636576065135147\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 182\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 183\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.3\n", + "T-Statistic: -5.782581278907251\n", + "P-Value: 0.010285339228476416\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.22372881355932203\n", + "T-Statistic: -3.59486813709167\n", + "P-Value: 0.036895807895617604\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 183\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 184\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.2489754098360656\n", + "T-Statistic: -9.227144083318263\n", + "P-Value: 0.002692797704338326\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3220338983050847\n", + "Average of Other Ratios: 0.12245762711864407\n", + "T-Statistic: -7.84890994931086\n", + "P-Value: 0.004307536410571643\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 184\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 185\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.275\n", + "T-Statistic: -4.488369320415114\n", + "P-Value: 0.02063340350086202\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2833333333333333\n", + "Average of Other Ratios: 0.21610169491525422\n", + "T-Statistic: -2.401102376173316\n", + "P-Value: 0.09577949808406833\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 185\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 186\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.2791666666666667\n", + "T-Statistic: -7.354405200045387\n", + "P-Value: 0.00519578998775744\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.20254237288135596\n", + "T-Statistic: -7.415534221028932\n", + "P-Value: 0.005073622594306211\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 186\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 187\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4098360655737705\n", + "Average of Other Ratios: 0.2708333333333333\n", + "T-Statistic: -9.766999910411144\n", + "P-Value: 0.002280530521151582\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.1772598870056497\n", + "T-Statistic: -6.333333333333335\n", + "P-Value: 0.007959883216421762\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 187\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 188\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3333333333333333\n", + "Average of Other Ratios: 0.22659380692167577\n", + "T-Statistic: -9.085817324099237\n", + "P-Value: 0.011897809590563803\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.18333333333333332\n", + "Average of Other Ratios: 0.15677966101694915\n", + "T-Statistic: -6.26666666666667\n", + "P-Value: 0.00820192086968827\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 188\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 189\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.25416666666666665\n", + "T-Statistic: -6.766299230685365\n", + "P-Value: 0.0065959876919741\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.19413841807909604\n", + "T-Statistic: -5.382088936904238\n", + "P-Value: 0.012563625893657768\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 189\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 190\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7213114754098361\n", + "Average of Other Ratios: 0.27499999999999997\n", + "T-Statistic: -8.885189155139434\n", + "P-Value: 0.0030061660806281146\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.23545197740112994\n", + "T-Statistic: -1.9022556390977454\n", + "P-Value: 0.30811702486531156\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 190\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 191\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.34446721311475414\n", + "T-Statistic: -3.3256460363533833\n", + "P-Value: 0.04486462240497135\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.35\n", + "Average of Other Ratios: 0.2754237288135593\n", + "T-Statistic: -9.191300234460838\n", + "P-Value: 0.0027235534322141448\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 191\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 192\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.26584699453551913\n", + "T-Statistic: -15.779833617982495\n", + "P-Value: 0.0005532481835581896\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.26666666666666666\n", + "Average of Other Ratios: 0.20338983050847456\n", + "T-Statistic: -2.536300556483895\n", + "P-Value: 0.08495405146875473\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 192\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 193\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.2067622950819672\n", + "T-Statistic: -6.043879310010205\n", + "P-Value: 0.009084400202355151\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.1307909604519774\n", + "T-Statistic: -10.043517801177494\n", + "P-Value: 0.002101488368509634\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 193\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 194\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.225\n", + "T-Statistic: -9.957417838496047\n", + "P-Value: 0.0021551750735026\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.14766949152542375\n", + "T-Statistic: -6.856800905858473\n", + "P-Value: 0.006350573689746284\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 194\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 195\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4426229508196721\n", + "Average of Other Ratios: 0.2583333333333333\n", + "T-Statistic: -3.3466253328190545\n", + "P-Value: 0.04416940727951125\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.288135593220339\n", + "Average of Other Ratios: 0.16871468926553673\n", + "T-Statistic: -7.1482687865653824\n", + "P-Value: 0.005637501684561561\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 195\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 196\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.2692622950819672\n", + "T-Statistic: -4.2061085388157045\n", + "P-Value: 0.02453744768447861\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23333333333333334\n", + "Average of Other Ratios: 0.1483050847457627\n", + "T-Statistic: -2.948006683383839\n", + "P-Value: 0.06012104778921462\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 196\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 197\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.23654371584699452\n", + "T-Statistic: -14.867978061029115\n", + "P-Value: 0.0006602177614744704\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.26666666666666666\n", + "Average of Other Ratios: 0.14830508474576273\n", + "T-Statistic: -4.1037036556074025\n", + "P-Value: 0.02618807381282589\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 197\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 198\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.1875\n", + "T-Statistic: -34.83606557377048\n", + "P-Value: 5.20111686902031e-05\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.16885593220338985\n", + "T-Statistic: -6.912825719494809\n", + "P-Value: 0.0062046516135269335\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 198\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 199\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.20710382513661202\n", + "T-Statistic: -10.283901901516982\n", + "P-Value: 0.001960684970337362\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.21666666666666667\n", + "Average of Other Ratios: 0.1694915254237288\n", + "T-Statistic: -3.936227748605116\n", + "P-Value: 0.02920926328962092\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 199\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 200\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.28592896174863386\n", + "T-Statistic: -4.828799478606252\n", + "P-Value: 0.01693007546409189\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.288135593220339\n", + "Average of Other Ratios: 0.19858757062146892\n", + "T-Statistic: -3.923159163483533\n", + "P-Value: 0.029463468310785026\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 200\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 201\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7213114754098361\n", + "Average of Other Ratios: 0.3416666666666667\n", + "T-Statistic: -7.851608784383327\n", + "P-Value: 0.004303261311903815\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3416666666666667\n", + "T-Statistic: -7.374780272477125\n", + "P-Value: 0.0051546431499948815\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 201\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 202\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.37916666666666665\n", + "T-Statistic: -6.507337920439277\n", + "P-Value: 0.007370864736714179\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.346045197740113\n", + "T-Statistic: -6.302829818170098\n", + "P-Value: 0.008069446993235066\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 202\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 203\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.489275956284153\n", + "T-Statistic: -6.06668077104039\n", + "P-Value: 0.00898852350687363\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5423728813559322\n", + "Average of Other Ratios: 0.38834745762711864\n", + "T-Statistic: -7.130307147698573\n", + "P-Value: 0.005678300191929151\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 203\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 204\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.373155737704918\n", + "T-Statistic: -5.4116120970679615\n", + "P-Value: 0.01237436958129694\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.36292372881355933\n", + "T-Statistic: -3.837369345431411\n", + "P-Value: 0.031204798309479358\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 204\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 205\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4918032786885246\n", + "Average of Other Ratios: 0.3875\n", + "T-Statistic: -3.170655272045821\n", + "P-Value: 0.050455235210516786\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.37542372881355934\n", + "T-Statistic: -7.414573731136387\n", + "P-Value: 0.005075512686812505\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 205\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 206\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.4125\n", + "T-Statistic: -5.932056727240316\n", + "P-Value: 0.0095745359301512\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.3686440677966102\n", + "T-Statistic: -4.46962233410428\n", + "P-Value: 0.020866618588713408\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 206\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 207\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.39999999999999997\n", + "T-Statistic: -4.741062246648036\n", + "P-Value: 0.017795875960800792\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.3601694915254237\n", + "T-Statistic: -7.358286550031938\n", + "P-Value: 0.005187918406325382\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 207\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 208\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.39583333333333337\n", + "T-Statistic: -6.819438116767888\n", + "P-Value: 0.006450408520510395\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3504237288135593\n", + "T-Statistic: -6.6711622996447275\n", + "P-Value: 0.006867611409633449\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 208\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 209\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.41427595628415304\n", + "T-Statistic: -3.5454803247028934\n", + "P-Value: 0.03821378114440935\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3543785310734463\n", + "T-Statistic: -3.127498225142409\n", + "P-Value: 0.05216674452503871\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 209\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 210\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.37083333333333335\n", + "T-Statistic: -10.982657455064276\n", + "P-Value: 0.0016163522474045274\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3331920903954802\n", + "T-Statistic: -1.7611959878594572\n", + "P-Value: 0.1764230443495778\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 210\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 211\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.4810109289617486\n", + "T-Statistic: -2.397936955599478\n", + "P-Value: 0.09605254927181417\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.4387005649717514\n", + "T-Statistic: -3.5958734600175015\n", + "P-Value: 0.03686958706248058\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 211\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 212\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.39166666666666666\n", + "T-Statistic: -4.380390416901878\n", + "P-Value: 0.022024716850259\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3257062146892655\n", + "T-Statistic: -6.924029541793238\n", + "P-Value: 0.02022773333106733\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 212\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 213\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.36249999999999993\n", + "T-Statistic: -5.412060981830556\n", + "P-Value: 0.012371520890864776\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3334745762711865\n", + "T-Statistic: -4.989086378646126\n", + "P-Value: 0.015484895041677766\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 213\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 214\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4185792349726776\n", + "T-Statistic: -3.1318929933163084\n", + "P-Value: 0.05198914240331166\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.29555084745762716\n", + "T-Statistic: -4.560758399398884\n", + "P-Value: 0.019763952369741263\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 214\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 215\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.3980191256830601\n", + "T-Statistic: -6.2294612621620695\n", + "P-Value: 0.008341210430613992\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.37570621468926557\n", + "T-Statistic: -3.448652473575024\n", + "P-Value: 0.04097685603288608\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 215\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 216\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.43142076502732246\n", + "T-Statistic: -11.855239270966623\n", + "P-Value: 0.00129040506090979\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.388135593220339\n", + "T-Statistic: -4.65582342119235\n", + "P-Value: 0.018692942668843485\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 216\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 217\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.4312158469945355\n", + "T-Statistic: -2.769971026007625\n", + "P-Value: 0.06957017387261798\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.37125706214689264\n", + "T-Statistic: -4.6417007257391365\n", + "P-Value: 0.018847202898111523\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 217\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 218\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.3818306010928962\n", + "T-Statistic: -4.847117449604144\n", + "P-Value: 0.016756266049925205\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.3008474576271187\n", + "T-Statistic: -3.3764082011477075\n", + "P-Value: 0.04320576532803089\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 218\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 219\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.4064890710382514\n", + "T-Statistic: -1.6875505812729021\n", + "P-Value: 0.19008194720260654\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3799435028248587\n", + "T-Statistic: -2.299434047232845\n", + "P-Value: 0.10504286102353275\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 219\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 220\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.4\n", + "T-Statistic: -8.742170122442447\n", + "P-Value: 0.003151555065586373\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3674435028248587\n", + "T-Statistic: -3.575513903469711\n", + "P-Value: 0.03740527845126834\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 220\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 221\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.47274590163934427\n", + "T-Statistic: -3.8037548196566555\n", + "P-Value: 0.03192304772258143\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.4132768361581921\n", + "T-Statistic: -2.677380542667445\n", + "P-Value: 0.07521629738306508\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 221\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 222\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.3689207650273224\n", + "T-Statistic: -6.200033744114341\n", + "P-Value: 0.00845358911672797\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.3559322033898305\n", + "T-Statistic: -8.779860612843027\n", + "P-Value: 0.0031123529183814187\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 222\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 223\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.425\n", + "T-Statistic: -12.405173285892113\n", + "P-Value: 0.0011287421498101565\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.38210922787193974\n", + "T-Statistic: -2.7837850452128095\n", + "P-Value: 0.10845055325071953\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 223\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 224\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.3605874316939891\n", + "T-Statistic: -5.648887116271631\n", + "P-Value: 0.010980796736842062\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.35812146892655367\n", + "T-Statistic: -2.5160267825812963\n", + "P-Value: 0.08647807452830744\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 224\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 225\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.39364754098360655\n", + "T-Statistic: -4.293991625472901\n", + "P-Value: 0.023226546554816672\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.3601694915254237\n", + "T-Statistic: -3.233808333817773\n", + "P-Value: 0.0480764627944047\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 225\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 226\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.40614754098360656\n", + "T-Statistic: -4.422690436146169\n", + "P-Value: 0.02146562039496988\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.36694915254237287\n", + "T-Statistic: -2.474962294633946\n", + "P-Value: 0.08966910755373639\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 226\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 227\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.38148907103825136\n", + "T-Statistic: -8.30926126344939\n", + "P-Value: 0.0036525000991545153\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3294491525423729\n", + "T-Statistic: -5.302012165253797\n", + "P-Value: 0.013096358284056045\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 227\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 228\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.38749999999999996\n", + "T-Statistic: -3.4641024571780417\n", + "P-Value: 0.04051930159431608\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3333333333333333\n", + "T-Statistic: -4.82600482600724\n", + "P-Value: 0.016956798300543117\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 228\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 229\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.3691939890710383\n", + "T-Statistic: -6.170366319105602\n", + "P-Value: 0.008568905214827621\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.3177966101694915\n", + "T-Statistic: -7.026666666666668\n", + "P-Value: 0.0059214753537602605\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 229\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 230\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.4041666666666667\n", + "T-Statistic: -3.05362313626129\n", + "P-Value: 0.05526992091860083\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.379590395480226\n", + "T-Statistic: -4.111518045317934\n", + "P-Value: 0.02605716031917639\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 230\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 231\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6833333333333333\n", + "Average of Other Ratios: 0.42711748633879776\n", + "T-Statistic: -7.960605911691153\n", + "P-Value: 0.004135187577765976\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.41007532956685494\n", + "T-Statistic: -4.605203601134296\n", + "P-Value: 0.04405939035816047\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 231\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 232\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.45163934426229513\n", + "T-Statistic: -3.2153935344715583\n", + "P-Value: 0.04875516994578165\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.40127118644067794\n", + "T-Statistic: -2.4156666462852487\n", + "P-Value: 0.09453530050793599\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 232\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 233\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.43995901639344265\n", + "T-Statistic: -3.5143217545081513\n", + "P-Value: 0.03907627630046548\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.3686440677966102\n", + "T-Statistic: -3.612819691689752\n", + "P-Value: 0.036431155449573864\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 233\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 234\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.3816256830601093\n", + "T-Statistic: -11.100144808896685\n", + "P-Value: 0.0015665315397856033\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.36299435028248583\n", + "T-Statistic: -4.890297438969469\n", + "P-Value: 0.016355645150575224\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 234\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 235\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.4523224043715847\n", + "T-Statistic: -2.5642179367732165\n", + "P-Value: 0.082909074165789\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3626412429378531\n", + "T-Statistic: -2.3461567951496813\n", + "P-Value: 0.1006566092112587\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 235\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 236\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.4187158469945355\n", + "T-Statistic: -3.611089663775107\n", + "P-Value: 0.03647560832552664\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.41035781544256117\n", + "T-Statistic: -3.6256912791125826\n", + "P-Value: 0.06836231728156876\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 236\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 237\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4362021857923497\n", + "T-Statistic: -2.402996731954332\n", + "P-Value: 0.09561654218317737\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6779661016949152\n", + "Average of Other Ratios: 0.413135593220339\n", + "T-Statistic: -6.782402329135958\n", + "P-Value: 0.006551418920014316\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 237\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 238\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.75\n", + "Average of Other Ratios: 0.44371584699453553\n", + "T-Statistic: -9.529761387107365\n", + "P-Value: 0.0024505949878125062\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5932203389830508\n", + "Average of Other Ratios: 0.40091807909604515\n", + "T-Statistic: -7.503655577513371\n", + "P-Value: 0.004904092057273042\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 238\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 239\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.42315573770491804\n", + "T-Statistic: -2.2737870570417353\n", + "P-Value: 0.10754893601572461\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3500706214689266\n", + "T-Statistic: -4.153796413823909\n", + "P-Value: 0.025363321264457474\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 239\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 240\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.4083333333333333\n", + "T-Statistic: -7.346004713131698\n", + "P-Value: 0.005212880633794737\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.37645951035781544\n", + "T-Statistic: -9.627032312606932\n", + "P-Value: 0.010618294541056795\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 240\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 241\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.35833333333333334\n", + "T-Statistic: -4.3698631465799025\n", + "P-Value: 0.022166781498566\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.307909604519774\n", + "T-Statistic: -2.524577979762879\n", + "P-Value: 0.08583120198777253\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 241\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 242\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.4\n", + "T-Statistic: -16.027311891144493\n", + "P-Value: 0.0005282450921087477\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.423728813559322\n", + "T-Statistic: -2.5287816912705736\n", + "P-Value: 0.08551538240691227\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 242\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 243\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.40642076502732244\n", + "T-Statistic: -3.4412842582299485\n", + "P-Value: 0.04119738288603227\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.32478813559322034\n", + "T-Statistic: -11.455677986899559\n", + "P-Value: 0.0014276482471677783\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 243\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 244\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.36666666666666664\n", + "T-Statistic: -3.8190928053365694\n", + "P-Value: 0.031592715134547304\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.3050847457627119\n", + "T-Statistic: -5.140537735016466\n", + "P-Value: 0.01426366105655198\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 244\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 245\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.4458333333333333\n", + "T-Statistic: -2.433681073025334\n", + "P-Value: 0.09302352271601963\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.3813559322033898\n", + "T-Statistic: -1.4070831677394464\n", + "P-Value: 0.25411009793550343\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 245\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 246\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.3774590163934426\n", + "T-Statistic: -8.635338782373333\n", + "P-Value: 0.00326628135311723\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.32083333333333336\n", + "T-Statistic: -3.313513183532759\n", + "P-Value: 0.045273022622265666\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 246\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 247\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.41448087431693986\n", + "T-Statistic: -2.7772482375001317\n", + "P-Value: 0.06914909220734049\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.35056497175141244\n", + "T-Statistic: -2.7353018723026903\n", + "P-Value: 0.07162056517888178\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 247\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 248\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.3916666666666667\n", + "T-Statistic: -2.4539091507908326\n", + "P-Value: 0.09136101012842844\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3629943502824859\n", + "T-Statistic: -5.564047150200574\n", + "P-Value: 0.011454300879796521\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 248\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 249\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.425\n", + "T-Statistic: -3.8366329971259767\n", + "P-Value: 0.031220308656298136\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.3461158192090395\n", + "T-Statistic: -16.539568345323744\n", + "P-Value: 0.0004810744367490006\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 249\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 250\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.425\n", + "T-Statistic: -2.4919181614361894\n", + "P-Value: 0.08833429775357093\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3501412429378531\n", + "T-Statistic: -2.5736529074083476\n", + "P-Value: 0.08223167628896547\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 250\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 251\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.4394808743169399\n", + "T-Statistic: -3.2347368286888813\n", + "P-Value: 0.048042559031082566\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.354590395480226\n", + "T-Statistic: -4.788344485301914\n", + "P-Value: 0.01732232163571842\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 251\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 252\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.41051912568306015\n", + "T-Statistic: -3.2435150269952304\n", + "P-Value: 0.04772351523132277\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.3728813559322034\n", + "T-Statistic: -2.9121711386422233\n", + "P-Value: 0.061887533056045364\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 252\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 253\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.4291666666666667\n", + "T-Statistic: -3.4762904569519018\n", + "P-Value: 0.04016293189095533\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3759887005649718\n", + "T-Statistic: -2.087127965330441\n", + "P-Value: 0.17214627140969266\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 253\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 254\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.4148224043715847\n", + "T-Statistic: -5.582808024011134\n", + "P-Value: 0.011347326397319939\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.3898305084745763\n", + "T-Statistic: -5.7519673334545445\n", + "P-Value: 0.010439377744934933\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 254\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 255\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.39385245901639343\n", + "T-Statistic: -4.622249873571463\n", + "P-Value: 0.019062378557272608\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.35882768361581924\n", + "T-Statistic: -4.431514865270233\n", + "P-Value: 0.021351308327048055\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 255\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 256\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.42288251366120216\n", + "T-Statistic: -2.2047766760939767\n", + "P-Value: 0.11465918253571977\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.35169491525423735\n", + "T-Statistic: -2.667077795032928\n", + "P-Value: 0.07587912763738508\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 256\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 257\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.39392076502732243\n", + "T-Statistic: -3.234755633152603\n", + "P-Value: 0.048041872703208165\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.32627118644067793\n", + "T-Statistic: -2.8103535287436263\n", + "P-Value: 0.06727306836232257\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 257\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 258\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.4519808743169399\n", + "T-Statistic: -1.7442611130916705\n", + "P-Value: 0.17946189402091586\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3484934086629002\n", + "T-Statistic: -4.258091646437124\n", + "P-Value: 0.0509728994156799\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 258\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 259\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.3791666666666667\n", + "T-Statistic: -4.961251862891206\n", + "P-Value: 0.01572399871994463\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3923022598870056\n", + "T-Statistic: -4.900769721140662\n", + "P-Value: 0.01626036695364969\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 259\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 260\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7213114754098361\n", + "Average of Other Ratios: 0.3041666666666667\n", + "T-Statistic: -10.94515337164297\n", + "P-Value: 0.0016326978989827164\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.28700564971751413\n", + "T-Statistic: -6.458938312987853\n", + "P-Value: 0.007528780847255386\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 260\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 261\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.4235655737704918\n", + "T-Statistic: -2.1896107986499023\n", + "P-Value: 0.11629675807965059\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3671610169491525\n", + "T-Statistic: -3.7775875209558483\n", + "P-Value: 0.032496904050512185\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 261\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 262\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.43948087431693983\n", + "T-Statistic: -2.3754283562972716\n", + "P-Value: 0.09802174273100718\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.40084745762711865\n", + "T-Statistic: -5.117647058823531\n", + "P-Value: 0.014439978840531617\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 262\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 263\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.4208333333333333\n", + "T-Statistic: -4.533288093479356\n", + "P-Value: 0.020088194824742056\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.31631355932203387\n", + "T-Statistic: -4.390851056341401\n", + "P-Value: 0.021884716395917995\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 263\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 264\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.3310109289617486\n", + "T-Statistic: -3.076310185278194\n", + "P-Value: 0.05429278214985614\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3333333333333333\n", + "Average of Other Ratios: 0.2711864406779661\n", + "T-Statistic: -4.016632088371216\n", + "P-Value: 0.02770576700479268\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 264\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 265\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.43586065573770494\n", + "T-Statistic: -3.3338420681873173\n", + "P-Value: 0.04459138016214428\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3540489642184557\n", + "T-Statistic: -3.260853543038344\n", + "P-Value: 0.08256523104344153\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 265\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 266\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.4228825136612022\n", + "T-Statistic: -3.399472771732523\n", + "P-Value: 0.042477781423474185\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.37563559322033896\n", + "T-Statistic: -4.895597481406331\n", + "P-Value: 0.016307334114510047\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 266\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 267\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.4666666666666667\n", + "T-Statistic: -2.9838938061318583\n", + "P-Value: 0.0584148664790183\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.39682203389830506\n", + "T-Statistic: -4.774692154738101\n", + "P-Value: 0.017457356531658943\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 267\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 268\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.3833333333333333\n", + "T-Statistic: -8.868478725093105\n", + "P-Value: 0.003022689256232251\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.3771186440677966\n", + "T-Statistic: -6.9688146846829495\n", + "P-Value: 0.00606320270460328\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 268\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 269\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.4437841530054645\n", + "T-Statistic: -11.678549121160296\n", + "P-Value: 0.0013488323998950805\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.40480225988700563\n", + "T-Statistic: -2.244619924618763\n", + "P-Value: 0.15392599376576382\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 269\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 270\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.4271174863387978\n", + "T-Statistic: -3.7411614919740814\n", + "P-Value: 0.03331794416540563\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.33785310734463275\n", + "T-Statistic: -2.752891462679799\n", + "P-Value: 0.07057104088631745\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 270\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 271\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4189207650273224\n", + "T-Statistic: -4.8323202058438195\n", + "P-Value: 0.016896487730970963\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.32627118644067793\n", + "T-Statistic: -6.584419205304784\n", + "P-Value: 0.007128240934046268\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 271\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 272\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.344672131147541\n", + "T-Statistic: -3.571560837934814\n", + "P-Value: 0.03751043917785419\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.33340395480225987\n", + "T-Statistic: -2.392200090989989\n", + "P-Value: 0.09654983618738305\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 272\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 273\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.3942622950819672\n", + "T-Statistic: -7.793998738706418\n", + "P-Value: 0.004395749962862252\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3500706214689266\n", + "T-Statistic: -2.147266074887937\n", + "P-Value: 0.12101972629670502\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 273\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 274\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.47677595628415304\n", + "T-Statistic: -2.363289165755069\n", + "P-Value: 0.09910414730873617\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.39896421845574387\n", + "T-Statistic: -4.392202321683557\n", + "P-Value: 0.048125248972546525\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 274\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 275\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.43586065573770494\n", + "T-Statistic: -4.152645529463587\n", + "P-Value: 0.025381891014694326\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.363135593220339\n", + "T-Statistic: -2.037441371070689\n", + "P-Value: 0.1343771965437019\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 275\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 276\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.40225409836065573\n", + "T-Statistic: -4.3808607027131465\n", + "P-Value: 0.022018397914196755\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.3601694915254237\n", + "T-Statistic: -4.043803171028667\n", + "P-Value: 0.027220277010713455\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 276\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 277\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.41250000000000003\n", + "T-Statistic: -7.574148554850093\n", + "P-Value: 0.0047738189211758006\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.32916666666666666\n", + "T-Statistic: -4.334673395356122\n", + "P-Value: 0.0226503559908631\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 277\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 278\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.45833333333333337\n", + "T-Statistic: -2.975985583098297\n", + "P-Value: 0.058785578261159106\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.37711864406779666\n", + "T-Statistic: -1.9749677244040724\n", + "P-Value: 0.14275464845123928\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 278\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 279\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.4522540983606557\n", + "T-Statistic: -3.550343434787011\n", + "P-Value: 0.03808136417851271\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.4472457627118644\n", + "T-Statistic: -4.483251319120438\n", + "P-Value: 0.020696735526511162\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 279\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 280\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.4\n", + "T-Statistic: -3.9876185693143014\n", + "P-Value: 0.028236554956465203\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.346045197740113\n", + "T-Statistic: -5.143574545220902\n", + "P-Value: 0.014240481118726733\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 280\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 281\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.3416666666666667\n", + "T-Statistic: -7.253159939163658\n", + "P-Value: 0.005406805253936257\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.29971751412429376\n", + "T-Statistic: -8.707841203347302\n", + "P-Value: 0.0031878312735672807\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 281\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 282\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.38749999999999996\n", + "T-Statistic: -3.8840535871493516\n", + "P-Value: 0.030241314676875894\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3206920903954802\n", + "T-Statistic: -6.159177470925122\n", + "P-Value: 0.008612932023026598\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 282\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 283\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7666666666666667\n", + "Average of Other Ratios: 0.36065573770491804\n", + "T-Statistic: -12.408708085543239\n", + "P-Value: 0.001127792567025593\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3248587570621469\n", + "T-Statistic: -5.276561879022925\n", + "P-Value: 0.01327183724912815\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 283\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 284\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.35000000000000003\n", + "T-Statistic: -6.992150468759068\n", + "P-Value: 0.006005502007535316\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.34597457627118644\n", + "T-Statistic: -4.470968814876207\n", + "P-Value: 0.020849754679967962\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 284\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 285\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.4148907103825137\n", + "T-Statistic: -9.202675289866411\n", + "P-Value: 0.0027137429423814024\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.34590395480225994\n", + "T-Statistic: -3.714817178281528\n", + "P-Value: 0.033928358727363876\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 285\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 286\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.42288251366120216\n", + "T-Statistic: -4.288438027587506\n", + "P-Value: 0.023306668223673156\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3923728813559322\n", + "T-Statistic: -2.707175832648326\n", + "P-Value: 0.07333935451425552\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 286\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 287\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45901639344262296\n", + "Average of Other Ratios: 0.3666666666666667\n", + "T-Statistic: -2.327678697056396\n", + "P-Value: 0.10236424529812158\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3082627118644068\n", + "T-Statistic: -3.1270512783828903\n", + "P-Value: 0.052184849438917776\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 287\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 288\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7213114754098361\n", + "Average of Other Ratios: 0.38333333333333336\n", + "T-Statistic: -8.1660987570467\n", + "P-Value: 0.0038411801830783828\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.37617702448210927\n", + "T-Statistic: -2.1147485270709563\n", + "P-Value: 0.16874539562177085\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 288\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 289\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.43196721311475417\n", + "T-Statistic: -3.728410507789727\n", + "P-Value: 0.03361162082090968\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5423728813559322\n", + "Average of Other Ratios: 0.36292372881355933\n", + "T-Statistic: -5.215987959681293\n", + "P-Value: 0.013702017252772999\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 289\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 290\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.38333333333333336\n", + "T-Statistic: -6.688534635157998\n", + "P-Value: 0.0068169278980430605\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3816384180790961\n", + "T-Statistic: -2.042749961024093\n", + "P-Value: 0.17780819467640524\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 290\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 291\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.39385245901639343\n", + "T-Statistic: -3.766631201193383\n", + "P-Value: 0.03274110546757008\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.3559322033898305\n", + "T-Statistic: -2.3848638865930605\n", + "P-Value: 0.09719033554345949\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 291\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 292\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.3813524590163934\n", + "T-Statistic: -3.0906245783493898\n", + "P-Value: 0.05368742829459938\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3163841807909605\n", + "T-Statistic: -13.063945294843638\n", + "P-Value: 0.0009686387721898685\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 292\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 293\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.4\n", + "T-Statistic: -4.7424625152411375\n", + "P-Value: 0.01778161142649878\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.346045197740113\n", + "T-Statistic: -7.51860437612632\n", + "P-Value: 0.0048760779327184315\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 293\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 294\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.38162568306010936\n", + "T-Statistic: -6.039077465113362\n", + "P-Value: 0.009104762087317147\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.35\n", + "T-Statistic: -5.823074704906403\n", + "P-Value: 0.010086133108957392\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 294\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 295\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.5041666666666667\n", + "T-Statistic: -3.021862808739443\n", + "P-Value: 0.05667530663209718\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.4718455743879473\n", + "T-Statistic: -2.3238483653527293\n", + "P-Value: 0.145752005957584\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 295\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 296\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.4146857923497268\n", + "T-Statistic: -3.8865083819708626\n", + "P-Value: 0.030191715950954285\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.38747645951035786\n", + "T-Statistic: -2.48387096774193\n", + "P-Value: 0.13098307174005927\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 296\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 297\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.476912568306011\n", + "T-Statistic: -3.077135925982908\n", + "P-Value: 0.0542576288739498\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3967514124293785\n", + "T-Statistic: -6.064766311184656\n", + "P-Value: 0.008996522307758008\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 297\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 298\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.39583333333333337\n", + "T-Statistic: -15.840015885207144\n", + "P-Value: 0.0005470254169839911\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3672316384180791\n", + "T-Statistic: -3.2003787654626485\n", + "P-Value: 0.04931755050578723\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 298\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 299\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7540983606557377\n", + "Average of Other Ratios: 0.39999999999999997\n", + "T-Statistic: -11.93916601780205\n", + "P-Value: 0.0012638276378236352\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3670197740112995\n", + "T-Statistic: -2.739338279816391\n", + "P-Value: 0.07137801893519374\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 299\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 300\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.40218579234972673\n", + "T-Statistic: -2.5598239902355586\n", + "P-Value: 0.08322688778357085\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3377118644067797\n", + "T-Statistic: -8.333345166769199\n", + "P-Value: 0.0036219707869269407\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 300\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 301\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7213114754098361\n", + "Average of Other Ratios: 0.36666666666666664\n", + "T-Statistic: -11.112432660516188\n", + "P-Value: 0.0015614391657328315\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.3432203389830508\n", + "T-Statistic: -3.466666666666669\n", + "P-Value: 0.04044399231953359\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 301\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 302\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.45416666666666666\n", + "T-Statistic: -2.293338139417192\n", + "P-Value: 0.10563206329335763\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3757062146892655\n", + "T-Statistic: -6.28618557093712\n", + "P-Value: 0.00813006680248645\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 302\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 303\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.47540983606557374\n", + "Average of Other Ratios: 0.37083333333333335\n", + "T-Statistic: -2.937540923889767\n", + "P-Value: 0.06063034832622183\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3204802259887006\n", + "T-Statistic: -5.96246084588649\n", + "P-Value: 0.00943790429103949\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 303\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 304\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.4\n", + "T-Statistic: -7.838301347827194\n", + "P-Value: 0.004324395459725133\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.3644067796610169\n", + "T-Statistic: -6.357755313912211\n", + "P-Value: 0.00787356840736677\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 304\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 305\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.39344262295081966\n", + "T-Statistic: -2.704003786107557\n", + "P-Value: 0.0735363944189725\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.3771186440677966\n", + "T-Statistic: -1.5819079806991574\n", + "P-Value: 0.21181729689488749\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 305\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 306\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.4291666666666667\n", + "T-Statistic: -2.2939904432507365\n", + "P-Value: 0.10556882390727751\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.4067796610169492\n", + "T-Statistic: -4.404557662822478\n", + "P-Value: 0.021703011848884367\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 306\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 307\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.43333333333333335\n", + "T-Statistic: -5.721871764168063\n", + "P-Value: 0.010593767203391514\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.4101694915254237\n", + "T-Statistic: -2.577629100382969\n", + "P-Value: 0.12328440517539614\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 307\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 308\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.4375\n", + "T-Statistic: -9.510009673164706\n", + "P-Value: 0.002465501797241344\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.3753531073446328\n", + "T-Statistic: -6.31056255621062\n", + "P-Value: 0.008041485679774507\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 308\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 309\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.4521174863387978\n", + "T-Statistic: -3.296192817472393\n", + "P-Value: 0.04586424145751324\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3672316384180791\n", + "T-Statistic: -7.5424723326565015\n", + "P-Value: 0.00483178537362738\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 309\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 310\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.373155737704918\n", + "T-Statistic: -3.7418090166847584\n", + "P-Value: 0.03330311842719912\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3145951035781544\n", + "T-Statistic: -3.1849674947750137\n", + "P-Value: 0.08604712307024512\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 310\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 311\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.39002732240437155\n", + "T-Statistic: -5.154995937878714\n", + "P-Value: 0.014153740439881685\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.32551789077212806\n", + "T-Statistic: -1.4349594508232855\n", + "P-Value: 0.2877633918978512\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 311\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 312\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.39767759562841526\n", + "T-Statistic: -2.5958139567349727\n", + "P-Value: 0.08066720661779468\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.354590395480226\n", + "T-Statistic: -3.4224483105094405\n", + "P-Value: 0.041768035740251584\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 312\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 313\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4650273224043716\n", + "T-Statistic: -3.1399952826589113\n", + "P-Value: 0.051663705233341434\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.3714689265536723\n", + "T-Statistic: -5.34778311027372\n", + "P-Value: 0.012788321514374179\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 313\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 314\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.3982923497267759\n", + "T-Statistic: -4.422992997264673\n", + "P-Value: 0.021461687900979676\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.3771186440677966\n", + "T-Statistic: -2.881670261299946\n", + "P-Value: 0.06344247147107336\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 314\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 315\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.38934426229508196\n", + "T-Statistic: -3.7175780367875846\n", + "P-Value: 0.03386371975978312\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.33749999999999997\n", + "T-Statistic: -14.93808506039733\n", + "P-Value: 0.0006510638055588633\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 315\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 316\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.4191256830601093\n", + "T-Statistic: -2.474712698219785\n", + "P-Value: 0.08968894024591773\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3628531073446327\n", + "T-Statistic: -7.3695757821568195\n", + "P-Value: 0.005165112471402641\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 316\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 317\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.4208333333333334\n", + "T-Statistic: -6.523182985505984\n", + "P-Value: 0.007320113516282464\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.32937853107344633\n", + "T-Statistic: -2.297491967496297\n", + "P-Value: 0.10523014082412589\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 317\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 318\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.4375\n", + "T-Statistic: -5.669780163353537\n", + "P-Value: 0.010868137779207396\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.42627118644067796\n", + "T-Statistic: -4.07489509276445\n", + "P-Value: 0.02667808711215743\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 318\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 319\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.34795081967213115\n", + "T-Statistic: -4.836426389706701\n", + "P-Value: 0.01685742415885029\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3206920903954802\n", + "T-Statistic: -6.4099048762033455\n", + "P-Value: 0.00769333223799094\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 319\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 320\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.47540983606557374\n", + "Average of Other Ratios: 0.27499999999999997\n", + "T-Statistic: -6.209471659985009\n", + "P-Value: 0.008417331899586392\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.288135593220339\n", + "Average of Other Ratios: 0.2196327683615819\n", + "T-Statistic: -3.2102314527592526\n", + "P-Value: 0.04894759775912512\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 320\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 321\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.4395491803278688\n", + "T-Statistic: -6.062960519560702\n", + "P-Value: 0.009004075645090898\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.3799435028248588\n", + "T-Statistic: -4.547561092589984\n", + "P-Value: 0.019918871326456516\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 321\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 322\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.38633879781420766\n", + "T-Statistic: -3.142740772481161\n", + "P-Value: 0.051554012753716585\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3415960451977401\n", + "T-Statistic: -2.7606608826884482\n", + "P-Value: 0.07011354688774671\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 322\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 323\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.3895491803278689\n", + "T-Statistic: -2.647776278064909\n", + "P-Value: 0.07714046268331175\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.33778248587570625\n", + "T-Statistic: -2.679574844992895\n", + "P-Value: 0.07507605335291008\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 323\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 324\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.4375\n", + "T-Statistic: -3.5755004055064217\n", + "P-Value: 0.037405636887797994\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.36687853107344637\n", + "T-Statistic: -2.7295669647405267\n", + "P-Value: 0.07196693599133859\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 324\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 325\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.47288251366120215\n", + "T-Statistic: -1.789503264890946\n", + "P-Value: 0.17147429025836847\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.3924435028248588\n", + "T-Statistic: -4.124951949583921\n", + "P-Value: 0.02583406935919925\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 325\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 326\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.3976092896174863\n", + "T-Statistic: -2.616716369900444\n", + "P-Value: 0.07922508793306127\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3293079096045197\n", + "T-Statistic: -5.9823754058047705\n", + "P-Value: 0.009349794795095102\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 326\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 327\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.3458333333333334\n", + "T-Statistic: -12.446270789290725\n", + "P-Value: 0.0011177670196131112\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3120056497175141\n", + "T-Statistic: -4.032355280571854\n", + "P-Value: 0.027423480466249588\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 327\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 328\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4144125683060109\n", + "T-Statistic: -3.3818058014152355\n", + "P-Value: 0.04303398980095592\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.559322033898305\n", + "Average of Other Ratios: 0.3836864406779661\n", + "T-Statistic: -4.836938005621383\n", + "P-Value: 0.01685256521081953\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 328\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 329\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.40225409836065573\n", + "T-Statistic: -1.9432287224055769\n", + "P-Value: 0.14724565777265242\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3209745762711864\n", + "T-Statistic: -4.989071878278333\n", + "P-Value: 0.015485018365118487\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 329\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 330\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.3625\n", + "T-Statistic: -6.568076588878649\n", + "P-Value: 0.007178794023354771\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.3389830508474576\n", + "T-Statistic: -6.817746450746516\n", + "P-Value: 0.00645497746167015\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 330\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 331\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.38989071038251366\n", + "T-Statistic: -3.8433173783046315\n", + "P-Value: 0.031079871545519045\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3292372881355932\n", + "T-Statistic: -5.0198252557428855\n", + "P-Value: 0.015226316305783671\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 331\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 332\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.41058743169398904\n", + "T-Statistic: -5.412428171026809\n", + "P-Value: 0.012369191290195588\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3586158192090395\n", + "T-Statistic: -4.20590123701231\n", + "P-Value: 0.0245406514009237\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 332\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 333\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.4291666666666667\n", + "T-Statistic: -4.446685303445383\n", + "P-Value: 0.021156625841990514\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.36730225988700566\n", + "T-Statistic: -5.386626427441342\n", + "P-Value: 0.012534293739746833\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 333\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 334\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.41666666666666663\n", + "T-Statistic: -4.468875866263539\n", + "P-Value: 0.020875975290483854\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.385593220338983\n", + "T-Statistic: -9.000000000000007\n", + "P-Value: 0.002895812161864139\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 334\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 335\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.4230191256830601\n", + "T-Statistic: -2.1008219346441472\n", + "P-Value: 0.12646621272270542\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.3543785310734463\n", + "T-Statistic: -13.59402614992177\n", + "P-Value: 0.0008610534762829226\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 335\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 336\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.40710382513661203\n", + "T-Statistic: -4.176894082046699\n", + "P-Value: 0.02499431409045767\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.2635593220338983\n", + "T-Statistic: -2.8494467148061093\n", + "P-Value: 0.10425518121888491\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 336\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 337\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.41065573770491803\n", + "T-Statistic: -6.713645725875071\n", + "P-Value: 0.0067445330979899526\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3543785310734463\n", + "T-Statistic: -12.120686030907725\n", + "P-Value: 0.0012087821339881704\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 337\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 338\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.425\n", + "T-Statistic: -2.8842564548659144\n", + "P-Value: 0.06330874589287337\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.34745762711864403\n", + "T-Statistic: -9.372619697821948\n", + "P-Value: 0.00257258879538172\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 338\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 339\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.45416666666666666\n", + "T-Statistic: -6.130104668195616\n", + "P-Value: 0.008728726955771435\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.576271186440678\n", + "Average of Other Ratios: 0.3879943502824859\n", + "T-Statistic: -9.187860980455177\n", + "P-Value: 0.0027265288821396168\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 339\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 340\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.37083333333333335\n", + "T-Statistic: -8.031429676406121\n", + "P-Value: 0.004030601648600872\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3543079096045198\n", + "T-Statistic: -3.2679435909119663\n", + "P-Value: 0.046849662447178565\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 340\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 341\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.4480191256830601\n", + "T-Statistic: -3.7683602379617436\n", + "P-Value: 0.032702411681861464\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.4048728813559322\n", + "T-Statistic: -6.267903613009531\n", + "P-Value: 0.008197342653735116\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 341\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 342\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.41885245901639345\n", + "T-Statistic: -1.6093008135111428\n", + "P-Value: 0.20592487686100341\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3246468926553672\n", + "T-Statistic: -2.4329184228681733\n", + "P-Value: 0.0930869236120761\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 342\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 343\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.37083333333333335\n", + "T-Statistic: -6.267404269385084\n", + "P-Value: 0.008199190432380252\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.364406779661017\n", + "T-Statistic: -7.823426359338976\n", + "P-Value: 0.004348181653136324\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 343\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 344\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.4438524590163934\n", + "T-Statistic: -5.090988202057138\n", + "P-Value: 0.014648921763255886\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.36313559322033895\n", + "T-Statistic: -3.4592703994453387\n", + "P-Value: 0.04066170197907394\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 344\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 345\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.3083333333333333\n", + "T-Statistic: -18.57762175574247\n", + "P-Value: 0.0003403993871209538\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.27429378531073445\n", + "T-Statistic: -13.77956011482232\n", + "P-Value: 0.0008271633740114473\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 345\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 346\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.40232240437158473\n", + "T-Statistic: -2.44235164169727\n", + "P-Value: 0.09230641363259132\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.34562146892655365\n", + "T-Statistic: -2.1945120664549007\n", + "P-Value: 0.11576447779308814\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 346\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 347\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.39801912568306014\n", + "T-Statistic: -2.3401785284145156\n", + "P-Value: 0.10120527554802543\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.37535310734463284\n", + "T-Statistic: -2.7799229360270217\n", + "P-Value: 0.06899512300862856\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 347\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 348\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.3650273224043715\n", + "T-Statistic: -3.293760329942494\n", + "P-Value: 0.0459480546557983\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.28691148775894537\n", + "T-Statistic: -2.3579139690812547\n", + "P-Value: 0.14242127080654132\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 348\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 349\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.392167577413479\n", + "T-Statistic: -1.8864910288697916\n", + "P-Value: 0.19986672981680156\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.328954802259887\n", + "T-Statistic: -8.470193664445993\n", + "P-Value: 0.003454751941090446\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 349\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 350\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.45833333333333337\n", + "T-Statistic: -4.811368296463101\n", + "P-Value: 0.017097654477865952\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.4110169491525424\n", + "T-Statistic: -3.8450462874172215\n", + "P-Value: 0.031043679643556813\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 350\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 351\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.3773224043715847\n", + "T-Statistic: -9.313966981164503\n", + "P-Value: 0.002620182057561568\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3502824858757062\n", + "T-Statistic: -8.315218406202995\n", + "P-Value: 0.003644917117727713\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 351\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 352\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4148224043715847\n", + "T-Statistic: -4.737661283448938\n", + "P-Value: 0.017830583546120875\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3632062146892655\n", + "T-Statistic: -2.5878167974480393\n", + "P-Value: 0.08122750520425058\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 352\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 353\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.4583333333333333\n", + "T-Statistic: -4.666486269355321\n", + "P-Value: 0.018577558529827218\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.4110169491525424\n", + "T-Statistic: -9.907502030846539\n", + "P-Value: 0.0021871339690660045\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 353\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 354\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.43920765027322406\n", + "T-Statistic: -4.46624735000153\n", + "P-Value: 0.02090896608594079\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.417725988700565\n", + "T-Statistic: -3.7729601826121053\n", + "P-Value: 0.03259975536350789\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 354\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 355\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.4187158469945355\n", + "T-Statistic: -1.9672355178709884\n", + "P-Value: 0.1438337407120422\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3757062146892655\n", + "T-Statistic: -6.25244138363679\n", + "P-Value: 0.00825481406798463\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 355\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 356\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.4\n", + "T-Statistic: -8.419686273573582\n", + "P-Value: 0.0035152556902358035\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3332627118644068\n", + "T-Statistic: -9.283339926398568\n", + "P-Value: 0.0026454980508478078\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 356\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 357\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.44166666666666665\n", + "T-Statistic: -3.447065049817408\n", + "P-Value: 0.041024239621887254\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.36320621468926556\n", + "T-Statistic: -4.276430704251948\n", + "P-Value: 0.023481122723639215\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 357\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 358\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.45\n", + "T-Statistic: -3.945284708663067\n", + "P-Value: 0.029034743151997156\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.40543785310734465\n", + "T-Statistic: -3.1308652437968307\n", + "P-Value: 0.05203060761024695\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 358\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 359\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.41045081967213115\n", + "T-Statistic: -3.0577746633809832\n", + "P-Value: 0.055089469506326795\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3670197740112995\n", + "T-Statistic: -3.3865448130535376\n", + "P-Value: 0.04288388807797634\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 359\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 360\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.4066256830601093\n", + "T-Statistic: -14.185315533396013\n", + "P-Value: 0.0007589934893947729\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3963983050847458\n", + "T-Statistic: -2.355804437045393\n", + "P-Value: 0.09977877653717733\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 360\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 361\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.3400273224043716\n", + "T-Statistic: -5.098953338267703\n", + "P-Value: 0.0145860830962082\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.2953389830508475\n", + "T-Statistic: -13.691233125567791\n", + "P-Value: 0.000843071969469656\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 361\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 362\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.4058743169398907\n", + "T-Statistic: -3.305417706655741\n", + "P-Value: 0.04554814619701029\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.3983050847457627\n", + "T-Statistic: -3.3886778033477842\n", + "P-Value: 0.042816545584287505\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 362\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 363\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.4125\n", + "T-Statistic: -2.2397642698297733\n", + "P-Value: 0.11098584190703364\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.3855932203389831\n", + "T-Statistic: -3.8368077899595163\n", + "P-Value: 0.03121662594701848\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 363\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 364\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.4041666666666667\n", + "T-Statistic: -6.036355410889254\n", + "P-Value: 0.009116331366960937\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.30430790960451976\n", + "T-Statistic: -4.336743966663418\n", + "P-Value: 0.022621526682653798\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 364\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 365\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4918032786885246\n", + "Average of Other Ratios: 0.4125\n", + "T-Statistic: -3.074072401423919\n", + "P-Value: 0.05438819286539707\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.35007062146892653\n", + "T-Statistic: -7.021230179838503\n", + "P-Value: 0.005934606781342949\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 365\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 366\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6833333333333333\n", + "Average of Other Ratios: 0.3903005464480874\n", + "T-Statistic: -10.873299213730528\n", + "P-Value: 0.0016646323342411745\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.37521186440677967\n", + "T-Statistic: -3.216398138197251\n", + "P-Value: 0.04871783239364945\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 366\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 367\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.36249999999999993\n", + "T-Statistic: -8.995168397182145\n", + "P-Value: 0.0029003471322019392\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3418079096045198\n", + "T-Statistic: -3.841143835488625\n", + "P-Value: 0.031125448001670088\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 367\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 368\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.4208333333333333\n", + "T-Statistic: -6.6644480933382715\n", + "P-Value: 0.006887332838366486\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.34187853107344635\n", + "T-Statistic: -3.1832852004313463\n", + "P-Value: 0.04996780945878653\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 368\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 369\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.425\n", + "T-Statistic: -2.678526658699412\n", + "P-Value: 0.07514300545940161\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.3686440677966102\n", + "T-Statistic: -5.6286160538198615\n", + "P-Value: 0.011091567639778482\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 369\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 370\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.43558743169398906\n", + "T-Statistic: -8.104167446872971\n", + "P-Value: 0.003926796249409396\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3332627118644068\n", + "T-Statistic: -6.9539103841401015\n", + "P-Value: 0.0061004380937106\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 370\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 371\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.4191256830601093\n", + "T-Statistic: -4.775358960423008\n", + "P-Value: 0.017450729445301273\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.37584745762711863\n", + "T-Statistic: -2.9318808768994544\n", + "P-Value: 0.06090803764359439\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 371\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 372\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.40416666666666673\n", + "T-Statistic: -6.290438634313473\n", + "P-Value: 0.008114520022772869\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.559322033898305\n", + "Average of Other Ratios: 0.4004237288135593\n", + "T-Statistic: -3.7578369991437737\n", + "P-Value: 0.03293881990782305\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 372\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 373\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7333333333333333\n", + "Average of Other Ratios: 0.3650956284153005\n", + "T-Statistic: -6.627801681197047\n", + "P-Value: 0.0069962968928984955\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.37966101694915255\n", + "T-Statistic: -5.662208585049302\n", + "P-Value: 0.010908789440745359\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 373\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 374\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.42295081967213116\n", + "T-Statistic: -2.3568974575090693\n", + "P-Value: 0.09967991090436344\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3548022598870057\n", + "T-Statistic: -3.46808249954851\n", + "P-Value: 0.04040248651626954\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 374\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 375\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.41045081967213115\n", + "T-Statistic: -4.3952411570175425\n", + "P-Value: 0.021826304453140925\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3711158192090396\n", + "T-Statistic: -2.4516068885038713\n", + "P-Value: 0.09154838744355702\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 375\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 376\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.41250000000000003\n", + "T-Statistic: -3.4877149221186383\n", + "P-Value: 0.03983250358959142\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.36271186440677966\n", + "T-Statistic: -9.86487655643415\n", + "P-Value: 0.0022149241738372996\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 376\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 377\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.4026639344262295\n", + "T-Statistic: -4.885900809261954\n", + "P-Value: 0.016395862833158793\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3713983050847458\n", + "T-Statistic: -8.050860563356986\n", + "P-Value: 0.0040025200155213245\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 377\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 378\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.37916666666666665\n", + "T-Statistic: -7.6296187670353355\n", + "P-Value: 0.004674501764841112\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.32507062146892657\n", + "T-Statistic: -2.8491875970653315\n", + "P-Value: 0.06515249712984865\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 378\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 379\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.4458333333333333\n", + "T-Statistic: -6.638664993068123\n", + "P-Value: 0.006963760953222826\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.36306497175141245\n", + "T-Statistic: -6.0781754491615825\n", + "P-Value: 0.008940693333973865\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 379\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 380\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7166666666666667\n", + "Average of Other Ratios: 0.41912568306010933\n", + "T-Statistic: -17.09897132088851\n", + "P-Value: 0.00043575164889100897\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.4406779661016949\n", + "T-Statistic: -5.490939506738963\n", + "P-Value: 0.011883974620146622\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 380\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 381\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6833333333333333\n", + "Average of Other Ratios: 0.44829234972677595\n", + "T-Statistic: -5.766710653623844\n", + "P-Value: 0.010364820031007032\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.559322033898305\n", + "Average of Other Ratios: 0.4046610169491526\n", + "T-Statistic: -3.190034610277395\n", + "P-Value: 0.04970976762287175\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 381\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 382\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.4125\n", + "T-Statistic: -5.5278108432930315\n", + "P-Value: 0.01166468471503337\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.33375706214689266\n", + "T-Statistic: -2.933998334563753\n", + "P-Value: 0.06080396633288608\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 382\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 383\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.425\n", + "T-Statistic: -10.350486199201907\n", + "P-Value: 0.0019239067953852225\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3626412429378531\n", + "T-Statistic: -2.0904012105524146\n", + "P-Value: 0.1277280900520854\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 383\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 384\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.4314890710382514\n", + "T-Statistic: -3.272785517027942\n", + "P-Value: 0.046678870059437404\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3504237288135593\n", + "T-Statistic: -5.129835675678766\n", + "P-Value: 0.014345743161730394\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 384\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 385\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.39583333333333337\n", + "T-Statistic: -10.70159618814971\n", + "P-Value: 0.0017443718008383813\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3543079096045198\n", + "T-Statistic: -4.181753231671517\n", + "P-Value: 0.02491756822152362\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 385\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 386\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.4\n", + "T-Statistic: -4.125557008441323\n", + "P-Value: 0.025824079480551702\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.346045197740113\n", + "T-Statistic: -2.667891875399661\n", + "P-Value: 0.07582649117357966\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 386\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 387\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.39385245901639343\n", + "T-Statistic: -8.973702530064598\n", + "P-Value: 0.0029206091675186585\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3714689265536723\n", + "T-Statistic: -5.879572942861632\n", + "P-Value: 0.009816556345478428\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 387\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 388\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.41885245901639345\n", + "T-Statistic: -12.364286659593969\n", + "P-Value: 0.0011398034481367935\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.37966101694915255\n", + "T-Statistic: -6.598309513974847\n", + "P-Value: 0.007085641181587867\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 388\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 389\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4426229508196721\n", + "Average of Other Ratios: 0.4\n", + "T-Statistic: -2.801466195764125\n", + "P-Value: 0.06777041139462144\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3627824858757063\n", + "T-Statistic: -4.484419263456089\n", + "P-Value: 0.020682260794596567\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 389\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 390\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.4166666666666667\n", + "T-Statistic: -7.828769077961604\n", + "P-Value: 0.004339618418212919\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.559322033898305\n", + "Average of Other Ratios: 0.3836864406779661\n", + "T-Statistic: -4.199186687635008\n", + "P-Value: 0.024644713133568197\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 390\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 391\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.3458333333333333\n", + "T-Statistic: -12.230197713801166\n", + "P-Value: 0.0011771054889125297\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.35444915254237286\n", + "T-Statistic: -7.140350877192979\n", + "P-Value: 0.0056554388389449695\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 391\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 392\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.4041666666666667\n", + "T-Statistic: -4.3462339347627506\n", + "P-Value: 0.02249000005004558\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3755649717514124\n", + "T-Statistic: -4.427578329769953\n", + "P-Value: 0.021402204751343277\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 392\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 393\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.43333333333333335\n", + "T-Statistic: -9.683827326548414\n", + "P-Value: 0.0023383191790880323\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.37288135593220345\n", + "T-Statistic: -2.6341579069577183\n", + "P-Value: 0.0780460308657842\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 393\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 394\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.4439207650273224\n", + "T-Statistic: -2.1366037206586563\n", + "P-Value: 0.12224492481984166\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.33719397363465164\n", + "T-Statistic: -2.622926709469525\n", + "P-Value: 0.1197907664196296\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 394\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 395\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7704918032786885\n", + "Average of Other Ratios: 0.41666666666666663\n", + "T-Statistic: -23.25576104817017\n", + "P-Value: 0.00017417898616924437\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.36299435028248583\n", + "T-Statistic: -8.553684424935213\n", + "P-Value: 0.003357721498987871\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 395\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 396\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.3983606557377049\n", + "T-Statistic: -14.711044641258004\n", + "P-Value: 0.0006813380311103391\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.33326271186440676\n", + "T-Statistic: -18.123845744121766\n", + "P-Value: 0.00036642093322613114\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 396\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 397\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.44166666666666665\n", + "T-Statistic: -3.8282796790862137\n", + "P-Value: 0.0313969565110335\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.36864406779661013\n", + "T-Statistic: -3.142183764359604\n", + "P-Value: 0.051576243518155256\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 397\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 398\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.3980874316939891\n", + "T-Statistic: -5.915897985549872\n", + "P-Value: 0.009648207488286976\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.3389830508474576\n", + "T-Statistic: -7.756717518813398\n", + "P-Value: 0.004457004963920533\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 398\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 399\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.4291666666666667\n", + "T-Statistic: -4.396123870194077\n", + "P-Value: 0.021814583998735686\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3290960451977401\n", + "T-Statistic: -5.1956627047447785\n", + "P-Value: 0.013850433541822478\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 399\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 400\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7704918032786885\n", + "Average of Other Ratios: 0.375\n", + "T-Statistic: -27.40047589022894\n", + "P-Value: 0.00010668857406321375\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3206920903954802\n", + "T-Statistic: -4.95328158335475\n", + "P-Value: 0.015793349475275394\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 400\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 401\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.4416666666666667\n", + "T-Statistic: -5.458841849494081\n", + "P-Value: 0.012079282996343666\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.4135593220338983\n", + "T-Statistic: -3.3916011823151724\n", + "P-Value: 0.04272446714466854\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 401\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 402\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.39166666666666666\n", + "T-Statistic: -7.569913857342007\n", + "P-Value: 0.004781514945095522\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3248587570621469\n", + "T-Statistic: -6.203180801394921\n", + "P-Value: 0.008441476456348773\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 402\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 403\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.44781420765027324\n", + "T-Statistic: -3.9576539242406734\n", + "P-Value: 0.02879855831206138\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.40077683615819215\n", + "T-Statistic: -5.011757995152538\n", + "P-Value: 0.015293631034164368\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 403\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 404\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.43333333333333335\n", + "T-Statistic: -3.3050391298697224\n", + "P-Value: 0.04556106387896335\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.39237288135593223\n", + "T-Statistic: -11.151144229232496\n", + "P-Value: 0.0015455390866235931\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 404\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 405\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.275\n", + "T-Statistic: -20.058020052863462\n", + "P-Value: 0.0002708532723360041\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.28714689265536725\n", + "T-Statistic: -3.409092533983689\n", + "P-Value: 0.04217876250784662\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 405\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 406\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7\n", + "Average of Other Ratios: 0.38196721311475407\n", + "T-Statistic: -10.445379856844973\n", + "P-Value: 0.001873056150700024\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.33173258003766476\n", + "T-Statistic: -3.524320965413247\n", + "P-Value: 0.07193114099352556\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 406\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 407\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.35655737704918034\n", + "T-Statistic: -5.1034502548832\n", + "P-Value: 0.01455076115150943\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.3305084745762712\n", + "T-Statistic: -2.3067562719090997\n", + "P-Value: 0.10434037801243047\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 407\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 408\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.8032786885245902\n", + "Average of Other Ratios: 0.3791666666666667\n", + "T-Statistic: -15.40340748070682\n", + "P-Value: 0.0005943876559434028\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.36016949152542377\n", + "T-Statistic: -2.4527621833439794\n", + "P-Value: 0.09145430123164801\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 408\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 409\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.375\n", + "T-Statistic: -4.047798691785726\n", + "P-Value: 0.02714981201550245\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3483050847457627\n", + "T-Statistic: -11.775818566563489\n", + "P-Value: 0.00713429023847155\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 409\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 410\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.44398907103825136\n", + "T-Statistic: -3.5630178449635013\n", + "P-Value: 0.0377389945733\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.38806497175141247\n", + "T-Statistic: -2.8670514268084752\n", + "P-Value: 0.06420504585478538\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 410\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 411\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.4666666666666667\n", + "T-Statistic: -6.095443431544011\n", + "P-Value: 0.008869465900928337\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.4092514124293785\n", + "T-Statistic: -2.568138862870189\n", + "P-Value: 0.08262673431862806\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 411\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 412\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.4312841530054645\n", + "T-Statistic: -3.2330608226780675\n", + "P-Value: 0.0481037799750477\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3839689265536724\n", + "T-Statistic: -2.903867111427604\n", + "P-Value: 0.06230610742939453\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 412\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 413\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.4416666666666667\n", + "T-Statistic: -2.8905475311707214\n", + "P-Value: 0.06298491773711792\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.3940677966101695\n", + "T-Statistic: -6.80809631531433\n", + "P-Value: 0.006481122703408014\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 413\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 414\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.4110655737704918\n", + "T-Statistic: -5.966922212523975\n", + "P-Value: 0.009418071176895705\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3542372881355932\n", + "T-Statistic: -2.2861298780156964\n", + "P-Value: 0.10633394781548948\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 414\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 415\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.75\n", + "Average of Other Ratios: 0.36495901639344264\n", + "T-Statistic: -15.214865853342191\n", + "P-Value: 0.0006165289089404656\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.38425141242937855\n", + "T-Statistic: -2.687063737163629\n", + "P-Value: 0.07459984908288483\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 415\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 416\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.3916666666666667\n", + "T-Statistic: -33.655737704917975\n", + "P-Value: 5.766533760280624e-05\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3536723163841808\n", + "T-Statistic: -3.172729727206767\n", + "P-Value: 0.08662826593910922\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 416\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 417\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4230874316939891\n", + "T-Statistic: -3.063975004864087\n", + "P-Value: 0.054821341360087615\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.35593220338983045\n", + "T-Statistic: -4.843649660017283\n", + "P-Value: 0.016788991301625738\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 417\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 418\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.42500000000000004\n", + "T-Statistic: -7.268455569166599\n", + "P-Value: 0.005374210400514755\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3670197740112994\n", + "T-Statistic: -2.9579994448141673\n", + "P-Value: 0.05963975694231845\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 418\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 419\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.37083333333333335\n", + "T-Statistic: -7.4933789577565\n", + "P-Value: 0.004923473774846551\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.30903954802259886\n", + "T-Statistic: -2.5908795387845776\n", + "P-Value: 0.12224781736312054\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 419\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 420\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.48750000000000004\n", + "T-Statistic: -4.140983606557374\n", + "P-Value: 0.025571051486798255\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.576271186440678\n", + "Average of Other Ratios: 0.4008474576271186\n", + "T-Statistic: -8.297720726530276\n", + "P-Value: 0.0036672497469535698\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 420\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 421\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.45416666666666666\n", + "T-Statistic: -5.024423429015315\n", + "P-Value: 0.015188120980305192\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.38182674199623357\n", + "T-Statistic: -1.9595943025088522\n", + "P-Value: 0.1891147904640091\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 421\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 422\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.34426229508196726\n", + "T-Statistic: -10.200056186904932\n", + "P-Value: 0.0020083355064274685\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.32076271186440675\n", + "T-Statistic: -4.322021374340152\n", + "P-Value: 0.022827550876124904\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 422\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 423\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.75\n", + "Average of Other Ratios: 0.4724043715846995\n", + "T-Statistic: -4.432281354966694\n", + "P-Value: 0.021341416432768157\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.4388418079096046\n", + "T-Statistic: -2.947245022552593\n", + "P-Value: 0.0601579316359931\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 423\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 424\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.435724043715847\n", + "T-Statistic: -7.812794009945692\n", + "P-Value: 0.004365289535356393\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.39632768361581916\n", + "T-Statistic: -4.060240963855425\n", + "P-Value: 0.02693187975555432\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 424\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 425\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.37083333333333335\n", + "T-Statistic: -5.133817644374452\n", + "P-Value: 0.014315130666193885\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3418079096045198\n", + "T-Statistic: -3.27165152540788\n", + "P-Value: 0.04671879927774851\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 425\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 426\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.42083333333333334\n", + "T-Statistic: -5.4872114695067635\n", + "P-Value: 0.011906446056017608\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3483992467043315\n", + "T-Statistic: -5.499999999999996\n", + "P-Value: 0.031504003041813854\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 426\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 427\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.44583333333333336\n", + "T-Statistic: -5.613383822262529\n", + "P-Value: 0.011175768196393236\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.4279661016949152\n", + "T-Statistic: -10.734900802433872\n", + "P-Value: 0.0017285150870629262\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 427\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 428\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.4666666666666667\n", + "T-Statistic: -3.3346995013274205\n", + "P-Value: 0.044562916865554185\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.34230225988700563\n", + "T-Statistic: -3.432024351211093\n", + "P-Value: 0.041476672849086224\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 428\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 429\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.3775273224043716\n", + "T-Statistic: -6.826452300266374\n", + "P-Value: 0.006431509604653321\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.35021186440677965\n", + "T-Statistic: -10.964467451419276\n", + "P-Value: 0.0016242528673193293\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 429\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 430\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.3818989071038251\n", + "T-Statistic: -7.577650824830856\n", + "P-Value: 0.004767466337140692\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.37577683615819213\n", + "T-Statistic: -5.036096342175906\n", + "P-Value: 0.015091717648582546\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 430\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 431\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4771857923497268\n", + "T-Statistic: -2.244146678485596\n", + "P-Value: 0.11053578481406542\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.4134887005649718\n", + "T-Statistic: -4.473097699753992\n", + "P-Value: 0.020823127617620334\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 431\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 432\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.35833333333333334\n", + "T-Statistic: -3.3233585459170785\n", + "P-Value: 0.044941262424214\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.3081214689265537\n", + "T-Statistic: -3.5637008822210965\n", + "P-Value: 0.037720655653052076\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 432\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 433\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.4125000000000001\n", + "T-Statistic: -6.066153107963813\n", + "P-Value: 0.008990727202934024\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.37563559322033896\n", + "T-Statistic: -4.420794483496409\n", + "P-Value: 0.02149028401764827\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 433\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 434\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.37916666666666665\n", + "T-Statistic: -29.50232973023399\n", + "P-Value: 8.552802430562091e-05\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.34745762711864403\n", + "T-Statistic: -5.845641436961716\n", + "P-Value: 0.009977307916285254\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 434\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 435\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.35628415300546445\n", + "T-Statistic: -6.079870046364211\n", + "P-Value: 0.008933670268337511\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3375706214689265\n", + "T-Statistic: -16.999999999999993\n", + "P-Value: 0.00044334353831207803\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 435\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 436\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7333333333333333\n", + "Average of Other Ratios: 0.42356557377049187\n", + "T-Statistic: -9.685668717829044\n", + "P-Value: 0.002337018895349377\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.4263418079096045\n", + "T-Statistic: -4.09478757977338\n", + "P-Value: 0.026338481224541917\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 436\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 437\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.38333333333333336\n", + "T-Statistic: -11.898312771791307\n", + "P-Value: 0.001276673082635795\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3378531073446328\n", + "T-Statistic: -3.7042710296105374\n", + "P-Value: 0.034176730688008236\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 437\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 438\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.45642076502732243\n", + "T-Statistic: -6.157320526109763\n", + "P-Value: 0.008620267599043325\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.39661016949152544\n", + "T-Statistic: -3.8969992136018448\n", + "P-Value: 0.029980924038161237\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 438\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 439\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.35273224043715845\n", + "T-Statistic: -11.787092032898832\n", + "P-Value: 0.0013125340009585288\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.3305084745762712\n", + "T-Statistic: -3.6406043454349715\n", + "P-Value: 0.03572662155623015\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 439\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 440\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.48750000000000004\n", + "T-Statistic: -4.044451296869408\n", + "P-Value: 0.027208830672879287\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.42203389830508475\n", + "T-Statistic: -6.715954603984051\n", + "P-Value: 0.006737927535009723\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 440\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 441\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.32916666666666666\n", + "T-Statistic: -8.618610742527247\n", + "P-Value: 0.003284743377152693\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.32916666666666666\n", + "T-Statistic: -13.44549998474748\n", + "P-Value: 0.0008895256433019566\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 441\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 442\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.8360655737704918\n", + "Average of Other Ratios: 0.3625\n", + "T-Statistic: -21.607919158456742\n", + "P-Value: 0.00021691691845571816\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3208333333333333\n", + "T-Statistic: -3.6177990004181297\n", + "P-Value: 0.0363035983070319\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 442\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 443\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.425\n", + "T-Statistic: -12.578664061524803\n", + "P-Value: 0.0010833606619678423\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.37288135593220345\n", + "T-Statistic: -2.5220141862320173\n", + "P-Value: 0.08602451986599671\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 443\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 444\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.4166666666666667\n", + "T-Statistic: -5.772363123361995\n", + "P-Value: 0.010336420243029285\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.3940677966101695\n", + "T-Statistic: -5.016126374955461\n", + "P-Value: 0.01525713242925573\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 444\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 445\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.43968579234972677\n", + "T-Statistic: -2.8833841562530913\n", + "P-Value: 0.06335381086618001\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.39223163841807906\n", + "T-Statistic: -3.5593682026592988\n", + "P-Value: 0.03783717712998915\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 445\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 446\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.4685109289617486\n", + "T-Statistic: -3.74670122389729\n", + "P-Value: 0.03319137901399563\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.559322033898305\n", + "Average of Other Ratios: 0.41744350282485876\n", + "T-Statistic: -3.7183243247458044\n", + "P-Value: 0.03384627426695061\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 446\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 447\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.3916666666666667\n", + "T-Statistic: -4.201654069631227\n", + "P-Value: 0.02460640768982449\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.35593220338983056\n", + "T-Statistic: -4.164132562731401\n", + "P-Value: 0.025197328469594847\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 447\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 448\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.4519808743169399\n", + "T-Statistic: -3.088509743881098\n", + "P-Value: 0.05377632746313144\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.559322033898305\n", + "Average of Other Ratios: 0.37584745762711863\n", + "T-Statistic: -5.567318968997769\n", + "P-Value: 0.01143555025754969\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 448\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 449\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.464412568306011\n", + "T-Statistic: -3.9929846359991004\n", + "P-Value: 0.0281374053103564\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.43036723163841806\n", + "T-Statistic: -3.6908520053100666\n", + "P-Value: 0.03449613636026554\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 449\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 450\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.8688524590163934\n", + "Average of Other Ratios: 0.4041666666666667\n", + "T-Statistic: -111.52459016393443\n", + "P-Value: 1.5893997832869387e-06\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.4135593220338983\n", + "T-Statistic: -6.287241983947008\n", + "P-Value: 0.008126201507413536\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 450\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 451\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.3625\n", + "T-Statistic: -10.053864168618263\n", + "P-Value: 0.002095156783314077\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.354590395480226\n", + "T-Statistic: -4.195847013586463\n", + "P-Value: 0.024696683385781026\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 451\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 452\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.46249999999999997\n", + "T-Statistic: -4.30939544513269\n", + "P-Value: 0.023006171549745307\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.559322033898305\n", + "Average of Other Ratios: 0.3545197740112994\n", + "T-Statistic: -7.100680549678681\n", + "P-Value: 0.0057464492134181155\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 452\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 453\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.41878415300546445\n", + "T-Statistic: -3.907188916118307\n", + "P-Value: 0.029777989336963344\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3925141242937853\n", + "T-Statistic: -2.652993667727985\n", + "P-Value: 0.07679697042443225\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 453\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 454\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.4311475409836066\n", + "T-Statistic: -2.3185802321951905\n", + "P-Value: 0.10321797628133385\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.559322033898305\n", + "Average of Other Ratios: 0.32492937853107345\n", + "T-Statistic: -10.457966297708378\n", + "P-Value: 0.0018664458731184775\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 454\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 455\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.4395491803278689\n", + "T-Statistic: -3.7611226340766057\n", + "P-Value: 0.03286477251791561\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.35868644067796607\n", + "T-Statistic: -13.182726027622682\n", + "P-Value: 0.000943041742019242\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 455\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 456\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.42500000000000004\n", + "T-Statistic: -7.6310252969080015\n", + "P-Value: 0.004672019040052037\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.3855932203389831\n", + "T-Statistic: -4.476919366879844\n", + "P-Value: 0.0207754384968935\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 456\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 457\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.3772540983606557\n", + "T-Statistic: -2.892862105157335\n", + "P-Value: 0.06286629800985993\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.3038135593220339\n", + "T-Statistic: -2.8400584700013614\n", + "P-Value: 0.06564341941663877\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 457\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 458\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.3484972677595628\n", + "T-Statistic: -4.079918215864471\n", + "P-Value: 0.026591801968465816\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3038841807909604\n", + "T-Statistic: -4.6304395053630865\n", + "P-Value: 0.01897139383687084\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 458\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 459\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.35689890710382516\n", + "T-Statistic: -3.012713968246357\n", + "P-Value: 0.057088449146294205\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.29950564971751414\n", + "T-Statistic: -3.9160409861832215\n", + "P-Value: 0.02960312502298523\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 459\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 460\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.4519808743169399\n", + "T-Statistic: -3.585650713444847\n", + "P-Value: 0.03713733022490143\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.4095338983050847\n", + "T-Statistic: -3.343711500585689\n", + "P-Value: 0.04426514489947514\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 460\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 461\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.39829234972677596\n", + "T-Statistic: -6.228521035735816\n", + "P-Value: 0.008344770553796356\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.3516949152542373\n", + "T-Statistic: -2.764906125414197\n", + "P-Value: 0.06986512747198882\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 461\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 462\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.36495901639344264\n", + "T-Statistic: -11.467033890261218\n", + "P-Value: 0.001423486681886134\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3331920903954802\n", + "T-Statistic: -3.755601850914824\n", + "P-Value: 0.03298931499378954\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 462\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 463\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.37083333333333335\n", + "T-Statistic: -18.492347410557112\n", + "P-Value: 0.00034509721387531697\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3418079096045198\n", + "T-Statistic: -5.430582663966678\n", + "P-Value: 0.012254723173754265\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 463\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 464\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.35833333333333334\n", + "T-Statistic: -7.031379503531193\n", + "P-Value: 0.005910122809393594\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.342090395480226\n", + "T-Statistic: -2.3719428909642666\n", + "P-Value: 0.09833105019693004\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 464\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 465\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.4273224043715847\n", + "T-Statistic: -7.050049697276166\n", + "P-Value: 0.005865429795204114\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.3714689265536723\n", + "T-Statistic: -9.312498469112693\n", + "P-Value: 0.002621388573795643\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 465\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 466\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.36475409836065575\n", + "T-Statistic: -3.2441722541392934\n", + "P-Value: 0.047699736310779436\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3089453860640301\n", + "T-Statistic: -4.904381946588596\n", + "P-Value: 0.03914970069711023\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 466\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 467\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.40416666666666673\n", + "T-Statistic: -4.882283495355984\n", + "P-Value: 0.016429048371143395\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.40120056497175144\n", + "T-Statistic: -3.3115695002057812\n", + "P-Value: 0.04533888578886271\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 467\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 468\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.40286885245901644\n", + "T-Statistic: -2.273916596472962\n", + "P-Value: 0.10753609772746466\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.34611581920903955\n", + "T-Statistic: -5.009915149758147\n", + "P-Value: 0.015309062462291299\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 468\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 469\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7166666666666667\n", + "Average of Other Ratios: 0.43176229508196723\n", + "T-Statistic: -4.998106522840042\n", + "P-Value: 0.01540842738464649\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3670197740112994\n", + "T-Statistic: -9.048660230550595\n", + "P-Value: 0.002850658340600586\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 469\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 470\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6833333333333333\n", + "Average of Other Ratios: 0.44036885245901636\n", + "T-Statistic: -2.6121434779788526\n", + "P-Value: 0.07953784934392538\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.34138418079096045\n", + "T-Statistic: -2.360589363770645\n", + "P-Value: 0.09934685042336319\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 470\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 471\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.48524590163934433\n", + "T-Statistic: -1.929654958496209\n", + "P-Value: 0.1492167828881105\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3801553672316384\n", + "T-Statistic: -2.8398703872208553\n", + "P-Value: 0.06565358206650312\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 471\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 472\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.4275273224043716\n", + "T-Statistic: -3.5561117736258945\n", + "P-Value: 0.037925056755999366\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3372175141242938\n", + "T-Statistic: -3.5534330922688557\n", + "P-Value: 0.037997540271468185\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 472\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 473\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.4625\n", + "T-Statistic: -4.040627784984222\n", + "P-Value: 0.027276446171401562\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.44491525423728817\n", + "T-Statistic: -2.26300952742407\n", + "P-Value: 0.10862358012074654\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 473\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 474\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.475\n", + "T-Statistic: -6.831662693460199\n", + "P-Value: 0.006417517963111248\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.41779661016949154\n", + "T-Statistic: -4.370123877984895\n", + "P-Value: 0.02216324865119666\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 474\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 475\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.35833333333333334\n", + "T-Statistic: -24.012092343181642\n", + "P-Value: 0.0001582981185228392\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.32033898305084746\n", + "T-Statistic: -9.161458990088187\n", + "P-Value: 0.011705573839571451\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 475\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 476\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7704918032786885\n", + "Average of Other Ratios: 0.4041666666666667\n", + "T-Statistic: -8.579921461021282\n", + "P-Value: 0.003327975019105188\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.384180790960452\n", + "T-Statistic: -3.706246583305507\n", + "P-Value: 0.03413002778125795\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 476\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 477\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.3778688524590164\n", + "T-Statistic: -5.101866069096539\n", + "P-Value: 0.014563191721761432\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3247881355932203\n", + "T-Statistic: -4.8167110843371415\n", + "P-Value: 0.017046061853773893\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 477\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 478\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7166666666666667\n", + "Average of Other Ratios: 0.385724043715847\n", + "T-Statistic: -7.664492373372008\n", + "P-Value: 0.004613454840878532\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.35444915254237286\n", + "T-Statistic: -3.7842912965837407\n", + "P-Value: 0.032348636596183485\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 478\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 479\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.4031876138433515\n", + "T-Statistic: -3.0047783082409976\n", + "P-Value: 0.09520452944787951\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.32492937853107345\n", + "T-Statistic: -3.4140132990801026\n", + "P-Value: 0.04202684058034692\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 479\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 480\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.4375\n", + "T-Statistic: -5.252662296910844\n", + "P-Value: 0.013439429896053543\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5423728813559322\n", + "Average of Other Ratios: 0.3884180790960452\n", + "T-Statistic: -7.8459916831607694\n", + "P-Value: 0.004312165413432141\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 480\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 481\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.35280054644808745\n", + "T-Statistic: -2.803386864259781\n", + "P-Value: 0.06766254223000473\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.2825564971751412\n", + "T-Statistic: -8.059631433861888\n", + "P-Value: 0.003989928963853392\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 481\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 482\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7868852459016393\n", + "Average of Other Ratios: 0.38333333333333336\n", + "T-Statistic: -10.324506821548738\n", + "P-Value: 0.0019381469069513462\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.3432203389830508\n", + "T-Statistic: -4.253333333333336\n", + "P-Value: 0.023821477797343008\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 482\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 483\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.42288251366120216\n", + "T-Statistic: -3.622802747830676\n", + "P-Value: 0.036175989328214676\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.3728813559322034\n", + "T-Statistic: -3.9191835884530852\n", + "P-Value: 0.029541363360540956\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 483\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 484\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.3319672131147541\n", + "T-Statistic: -17.615818436997262\n", + "P-Value: 0.0003987923532231844\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3247175141242938\n", + "T-Statistic: -3.2435043974379334\n", + "P-Value: 0.04772389993910521\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 484\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 485\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6833333333333333\n", + "Average of Other Ratios: 0.4146174863387978\n", + "T-Statistic: -6.525168128577647\n", + "P-Value: 0.007313787597179912\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.385593220338983\n", + "T-Statistic: -3.2052128901777346\n", + "P-Value: 0.04913559685870657\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 485\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 486\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.3689207650273224\n", + "T-Statistic: -4.354954591021095\n", + "P-Value: 0.022370006686486543\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.3458333333333333\n", + "T-Statistic: -6.3744164409558755\n", + "P-Value: 0.007815387560659307\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 486\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 487\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.3625\n", + "T-Statistic: -4.5595686353563485\n", + "P-Value: 0.01977785439608666\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.559322033898305\n", + "Average of Other Ratios: 0.3120056497175141\n", + "T-Statistic: -10.204926179291313\n", + "P-Value: 0.0020055260919182947\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 487\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 488\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.41475409836065574\n", + "T-Statistic: -8.217472162729653\n", + "P-Value: 0.00377202241458133\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5423728813559322\n", + "Average of Other Ratios: 0.3757062146892655\n", + "T-Statistic: -2.741962532807021\n", + "P-Value: 0.07122087552292544\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 488\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 489\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.4316256830601093\n", + "T-Statistic: -6.118804569189945\n", + "P-Value: 0.008774285877481431\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5423728813559322\n", + "Average of Other Ratios: 0.4221045197740113\n", + "T-Statistic: -5.546054501123849\n", + "P-Value: 0.011558137961560508\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 489\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 490\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.3958333333333333\n", + "T-Statistic: -5.135277089276766\n", + "P-Value: 0.014303932115525365\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.37951977401129944\n", + "T-Statistic: -4.250201094643769\n", + "P-Value: 0.02386812308620629\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 490\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 491\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4191256830601093\n", + "T-Statistic: -3.801261952080425\n", + "P-Value: 0.03197715417981016\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.3728813559322034\n", + "T-Statistic: -4.984275273297009\n", + "P-Value: 0.015525883080703483\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 491\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 492\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.3666666666666667\n", + "T-Statistic: -6.232543479694566\n", + "P-Value: 0.008329553737779013\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.3332627118644068\n", + "T-Statistic: -7.640503978788062\n", + "P-Value: 0.004655333124225627\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 492\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 493\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.4083333333333333\n", + "T-Statistic: -8.2470608782833\n", + "P-Value: 0.0037329364018606546\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.37570621468926557\n", + "T-Statistic: -6.286185570937117\n", + "P-Value: 0.008130066802486461\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 493\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 494\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.41892076502732245\n", + "T-Statistic: -5.9784922810858205\n", + "P-Value: 0.009366890251308508\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3500706214689266\n", + "T-Statistic: -2.435222247729202\n", + "P-Value: 0.09289556214456558\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 494\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 495\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4479508196721312\n", + "T-Statistic: -2.7046377161768436\n", + "P-Value: 0.0734969637824313\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.4049435028248588\n", + "T-Statistic: -5.1288742164266266\n", + "P-Value: 0.0143531474427786\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 495\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 496\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6833333333333333\n", + "Average of Other Ratios: 0.38169398907103824\n", + "T-Statistic: -8.926429305995807\n", + "P-Value: 0.002965896425199081\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.3389830508474576\n", + "T-Statistic: -18.452822728966602\n", + "P-Value: 0.00034730393994395734\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 496\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 497\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.4208333333333333\n", + "T-Statistic: -6.853539958645125\n", + "P-Value: 0.00635920568434066\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.39505649717514124\n", + "T-Statistic: -3.8915662650602405\n", + "P-Value: 0.16012516703063404\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 497\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 498\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.4351775956284153\n", + "T-Statistic: -3.62042968770528\n", + "P-Value: 0.03623643715847006\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.38799435028248586\n", + "T-Statistic: -4.361164169302447\n", + "P-Value: 0.022285069422793412\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 498\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 499\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7704918032786885\n", + "Average of Other Ratios: 0.3666666666666667\n", + "T-Statistic: -13.615808159190916\n", + "P-Value: 0.0008569802174432299\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.37973163841807916\n", + "T-Statistic: -2.7216754734382707\n", + "P-Value: 0.07244696363099003\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 499\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 500\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.3833333333333333\n", + "T-Statistic: -13.310561106566025\n", + "P-Value: 0.0009164879399678906\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3500706214689266\n", + "T-Statistic: -2.6495878836827105\n", + "P-Value: 0.07702097888753302\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 500\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 501\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.26666666666666666\n", + "T-Statistic: -13.732158696046199\n", + "P-Value: 0.0008356507564616465\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3220338983050847\n", + "Average of Other Ratios: 0.2699152542372881\n", + "T-Statistic: -4.064774047804071\n", + "P-Value: 0.02685304092817379\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 501\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 502\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.3980874316939891\n", + "T-Statistic: -8.506737943489776\n", + "P-Value: 0.003411830999011348\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.354590395480226\n", + "T-Statistic: -3.6417926698166023\n", + "P-Value: 0.035696879201596796\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 502\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 503\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.44166666666666665\n", + "T-Statistic: -5.438685446957326\n", + "P-Value: 0.01220408026603391\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3714689265536723\n", + "T-Statistic: -6.236810901332353\n", + "P-Value: 0.008313449992551876\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 503\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 504\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.4064890710382514\n", + "T-Statistic: -2.559704324213269\n", + "P-Value: 0.0832355641629743\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3586158192090395\n", + "T-Statistic: -4.619015880036581\n", + "P-Value: 0.019098463373393156\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 504\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 505\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7\n", + "Average of Other Ratios: 0.4148224043715847\n", + "T-Statistic: -5.693637680357018\n", + "P-Value: 0.010741337381016519\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.39661016949152544\n", + "T-Statistic: -2.9340578815309537\n", + "P-Value: 0.06080104286808068\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 505\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 506\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.4025273224043716\n", + "T-Statistic: -8.890442589650126\n", + "P-Value: 0.0030009962178549105\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.31228813559322033\n", + "T-Statistic: -6.309916959963368\n", + "P-Value: 0.008043815258589415\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 506\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 507\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.42500000000000004\n", + "T-Statistic: -6.851133053826303\n", + "P-Value: 0.006365586871051235\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3627824858757063\n", + "T-Statistic: -2.7319309661026785\n", + "P-Value: 0.07182390621092707\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 507\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 508\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.4791666666666667\n", + "T-Statistic: -5.22212477804604\n", + "P-Value: 0.013657614104771973\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.3716101694915254\n", + "T-Statistic: -2.9643107979224634\n", + "P-Value: 0.059338263715170134\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 508\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 509\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.3894808743169399\n", + "T-Statistic: -4.915294791461667\n", + "P-Value: 0.01612940842830176\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.3601694915254237\n", + "T-Statistic: -3.409705862611856\n", + "P-Value: 0.04215978877786793\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 509\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 510\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.8688524590163934\n", + "Average of Other Ratios: 0.35\n", + "T-Statistic: -25.41847552756969\n", + "P-Value: 0.0001335388647953177\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3630649717514124\n", + "T-Statistic: -5.7764663818628845\n", + "P-Value: 0.010315868020629015\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 510\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 511\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.37315573770491806\n", + "T-Statistic: -3.2713284175710178\n", + "P-Value: 0.04673018421334356\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.33742937853107347\n", + "T-Statistic: -6.023354344153489\n", + "P-Value: 0.009171855397206797\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 511\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 512\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.43606557377049177\n", + "T-Statistic: -5.107098757651382\n", + "P-Value: 0.014522185166915114\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.3258003766478343\n", + "T-Statistic: -48.499999999999964\n", + "P-Value: 0.00042485397562817395\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 512\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 513\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.37315573770491806\n", + "T-Statistic: -8.428620891983224\n", + "P-Value: 0.003504451310362902\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.2867231638418079\n", + "T-Statistic: -4.840000000000001\n", + "P-Value: 0.016823522712679524\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 513\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 514\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.35273224043715845\n", + "T-Statistic: -29.617790180570413\n", + "P-Value: 8.45343765080844e-05\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3209745762711864\n", + "T-Statistic: -3.5295270186824976\n", + "P-Value: 0.038652315352543654\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 514\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 515\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.44999999999999996\n", + "T-Statistic: -4.3195919820040585\n", + "P-Value: 0.02286178010818133\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3587570621468927\n", + "T-Statistic: -2.6549539521063013\n", + "P-Value: 0.07666840199371955\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 515\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 516\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7377049180327869\n", + "Average of Other Ratios: 0.3291666666666667\n", + "T-Statistic: -15.836371260048626\n", + "P-Value: 0.000547399610373209\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.286864406779661\n", + "T-Statistic: -10.52321129742476\n", + "P-Value: 0.0018326710996097446\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 516\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 517\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.4333333333333333\n", + "T-Statistic: -2.604375511062208\n", + "P-Value: 0.08007263680217473\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.3799435028248587\n", + "T-Statistic: -6.86666666666667\n", + "P-Value: 0.006324551982225255\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 517\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 518\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.42500000000000004\n", + "T-Statistic: -4.639344262295081\n", + "P-Value: 0.01887310292166381\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3755649717514124\n", + "T-Statistic: -3.418861432750501\n", + "P-Value: 0.04187784075273277\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 518\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 519\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.3958333333333333\n", + "T-Statistic: -4.09708695591706\n", + "P-Value: 0.026299586172448756\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.29964689265536726\n", + "T-Statistic: -5.609714558498766\n", + "P-Value: 0.011196176065376182\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 519\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 520\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.3958333333333333\n", + "T-Statistic: -3.537066558867117\n", + "P-Value: 0.03844426851884796\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.32024482109227875\n", + "T-Statistic: -10.47368421052631\n", + "P-Value: 0.008993145222343412\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 520\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 521\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.4230874316939891\n", + "T-Statistic: -6.814259023242736\n", + "P-Value: 0.006464409944966311\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3706214689265537\n", + "T-Statistic: -6.849973076409931\n", + "P-Value: 0.02065393933365802\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 521\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 522\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.3416666666666666\n", + "T-Statistic: -30.71720234254342\n", + "P-Value: 7.580035597644182e-05\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.3601694915254237\n", + "T-Statistic: -3.1333333333333346\n", + "P-Value: 0.05193110106234139\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 522\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 523\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7213114754098361\n", + "Average of Other Ratios: 0.42916666666666664\n", + "T-Statistic: -9.51209634369107\n", + "P-Value: 0.002463921302178941\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.3940677966101695\n", + "T-Statistic: -5.245148133977578\n", + "P-Value: 0.013492692472278441\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 523\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 524\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.39772313296903467\n", + "T-Statistic: -11.857859091548148\n", + "P-Value: 0.00703694821787774\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.37175141242937854\n", + "T-Statistic: -2.224480146264155\n", + "P-Value: 0.11257283375371135\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 524\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 525\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7166666666666667\n", + "Average of Other Ratios: 0.36461748633879776\n", + "T-Statistic: -8.164949484411201\n", + "P-Value: 0.003842746397948636\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.33340395480225987\n", + "T-Statistic: -4.859171643105984\n", + "P-Value: 0.016643155194105735\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 525\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 526\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.5\n", + "T-Statistic: -2.0077784776911325\n", + "P-Value: 0.1382803772888902\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.4194915254237288\n", + "T-Statistic: -2.8609898912604983\n", + "P-Value: 0.06452459428804105\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 526\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 527\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.4025273224043716\n", + "T-Statistic: -24.399378398771418\n", + "P-Value: 0.0001509087662519508\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.33333333333333337\n", + "T-Statistic: -6.275716324421885\n", + "P-Value: 0.008168503479790335\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 527\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 528\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.40211748633879785\n", + "T-Statistic: -3.4850713293771878\n", + "P-Value: 0.03990865507980857\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.29138418079096046\n", + "T-Statistic: -5.949008298622876\n", + "P-Value: 0.009498040523452093\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 528\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 529\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.4541666666666667\n", + "T-Statistic: -2.176256072652707\n", + "P-Value: 0.11776205733944392\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3882768361581921\n", + "T-Statistic: -4.096759822564065\n", + "P-Value: 0.026305115270877533\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 529\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 530\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.39166666666666666\n", + "T-Statistic: -4.846843823072969\n", + "P-Value: 0.01675884522410491\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.3389830508474576\n", + "T-Statistic: -2.4666312377251187\n", + "P-Value: 0.09033399701898738\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 530\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 531\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.4689890710382514\n", + "T-Statistic: -2.3029347384916004\n", + "P-Value: 0.10470629811332137\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.39216101694915256\n", + "T-Statistic: -2.6138797970211702\n", + "P-Value: 0.07941891525657455\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 531\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 532\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7377049180327869\n", + "Average of Other Ratios: 0.3125\n", + "T-Statistic: -40.55023240686837\n", + "P-Value: 3.3002033131333967e-05\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.2865348399246704\n", + "T-Statistic: -10.417426280656978\n", + "P-Value: 0.009089216213235031\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 532\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 533\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.44330601092896177\n", + "T-Statistic: -3.289205567147825\n", + "P-Value: 0.04610551507892302\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.559322033898305\n", + "Average of Other Ratios: 0.40494350282485875\n", + "T-Statistic: -3.5632864433698304\n", + "P-Value: 0.03773178159693135\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 533\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 534\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.3819672131147541\n", + "T-Statistic: -2.224352555510678\n", + "P-Value: 0.11258619639823252\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3081214689265537\n", + "T-Statistic: -2.706180513915112\n", + "P-Value: 0.07340111077355108\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 534\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 535\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.38333333333333336\n", + "T-Statistic: -2.615568260013164\n", + "P-Value: 0.07930346961482511\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3375706214689265\n", + "T-Statistic: -3.298098944452524\n", + "P-Value: 0.04579869976521974\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 535\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 536\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7666666666666667\n", + "Average of Other Ratios: 0.3896857923497268\n", + "T-Statistic: -13.06989618982596\n", + "P-Value: 0.0009673345468333082\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.30374293785310735\n", + "T-Statistic: -3.6378487523667777\n", + "P-Value: 0.0357957127768318\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 536\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 537\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.44999999999999996\n", + "T-Statistic: -2.9350727376464576\n", + "P-Value: 0.06075124544604779\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.37980225988700567\n", + "T-Statistic: -5.144656775656592\n", + "P-Value: 0.014232232343107012\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 537\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 538\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.38333333333333336\n", + "T-Statistic: -4.592341249903026\n", + "P-Value: 0.0193995061866707\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3334039548022599\n", + "T-Statistic: -5.550438587280521\n", + "P-Value: 0.011532723915216122\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 538\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 539\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.3729508196721311\n", + "T-Statistic: -5.094565492831203\n", + "P-Value: 0.014620656170024945\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3456214689265537\n", + "T-Statistic: -2.619445228666292\n", + "P-Value: 0.07903917160851617\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 539\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 540\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7666666666666667\n", + "Average of Other Ratios: 0.34446721311475414\n", + "T-Statistic: -16.788036508322786\n", + "P-Value: 0.00046020497513463827\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.34597457627118644\n", + "T-Statistic: -22.472055097835053\n", + "P-Value: 0.0001929545430821344\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 540\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 541\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.33333333333333337\n", + "T-Statistic: -5.392319340084748\n", + "P-Value: 0.012497618930811919\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.28707627118644075\n", + "T-Statistic: -3.9963952715287125\n", + "P-Value: 0.02807461925885932\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 541\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 542\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.41666666666666663\n", + "T-Statistic: -12.480891064461987\n", + "P-Value: 0.0011086313738353398\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.3855932203389831\n", + "T-Statistic: -3.388738579570791\n", + "P-Value: 0.042814628734577545\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 542\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 543\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7213114754098361\n", + "Average of Other Ratios: 0.45\n", + "T-Statistic: -15.07113703400236\n", + "P-Value: 0.0006341498821599429\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.4495291902071563\n", + "T-Statistic: -3.306335305824179\n", + "P-Value: 0.08057456813744839\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 543\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 544\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7\n", + "Average of Other Ratios: 0.38592896174863384\n", + "T-Statistic: -7.972344974800964\n", + "P-Value: 0.004117606574879096\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.33326271186440676\n", + "T-Statistic: -4.308228721563899\n", + "P-Value: 0.023022768426326834\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 544\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 545\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.3791666666666667\n", + "T-Statistic: -7.383406964717152\n", + "P-Value: 0.00513735130962263\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.30409604519774014\n", + "T-Statistic: -3.009193969690081\n", + "P-Value: 0.057248412075778514\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 545\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 546\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.3666666666666667\n", + "T-Statistic: -3.4195951141068646\n", + "P-Value: 0.041855350747093845\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.29526836158192094\n", + "T-Statistic: -7.3968729657562395\n", + "P-Value: 0.005110511890500865\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 546\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 547\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.48750000000000004\n", + "T-Statistic: -2.6937228618928075\n", + "P-Value: 0.07417954696153008\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3627824858757062\n", + "T-Statistic: -4.484419263456091\n", + "P-Value: 0.020682260794596535\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 547\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 548\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.3375\n", + "T-Statistic: -8.980327868852457\n", + "P-Value: 0.0029143354515137163\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.3432203389830508\n", + "T-Statistic: -8.529366637570185\n", + "P-Value: 0.0033856068172450396\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 548\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 549\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.3610655737704918\n", + "T-Statistic: -6.352580462402448\n", + "P-Value: 0.007891754845695532\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.3163135593220339\n", + "T-Statistic: -7.643205738383732\n", + "P-Value: 0.004650591457090468\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 549\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 550\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.3734972677595629\n", + "T-Statistic: -4.334361862984616\n", + "P-Value: 0.022654697680077447\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.2828389830508474\n", + "T-Statistic: -3.1258403580998007\n", + "P-Value: 0.052233941204207116\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 550\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 551\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.4599726775956284\n", + "T-Statistic: -2.514156166273152\n", + "P-Value: 0.08662037793756441\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.39406779661016955\n", + "T-Statistic: -2.982530505374287\n", + "P-Value: 0.058478563965002554\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 551\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 552\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.360724043715847\n", + "T-Statistic: -3.4306011747993215\n", + "P-Value: 0.041519810852172816\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.30903954802259886\n", + "T-Statistic: -7.0399681163437675\n", + "P-Value: 0.019586257511572443\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 552\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 553\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.44405737704918036\n", + "T-Statistic: -3.2820666584674085\n", + "P-Value: 0.04635368887671906\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.38813559322033897\n", + "T-Statistic: -1.8603721116372136\n", + "P-Value: 0.15977096644438923\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 553\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 554\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.4273224043715847\n", + "T-Statistic: -6.912582855578067\n", + "P-Value: 0.006205274596049662\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.358545197740113\n", + "T-Statistic: -4.751834676331621\n", + "P-Value: 0.017686518806967016\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 554\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 555\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.39849726775956285\n", + "T-Statistic: -7.109187347867667\n", + "P-Value: 0.005726771511083016\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3206920903954802\n", + "T-Statistic: -3.269035331124903\n", + "P-Value: 0.046811083782206005\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 555\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 556\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.3875\n", + "T-Statistic: -7.23666133386256\n", + "P-Value: 0.005442256485075891\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3500706214689266\n", + "T-Statistic: -3.7595621188342796\n", + "P-Value: 0.032899914767886236\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 556\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 557\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.41523224043715845\n", + "T-Statistic: -2.490304414512709\n", + "P-Value: 0.08846028307415578\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3628531073446328\n", + "T-Statistic: -5.912181117649445\n", + "P-Value: 0.009665258648047835\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 557\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 558\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6833333333333333\n", + "Average of Other Ratios: 0.4265710382513661\n", + "T-Statistic: -3.6163076187603744\n", + "P-Value: 0.03634174375169587\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.38834745762711864\n", + "T-Statistic: -3.4214826476365072\n", + "P-Value: 0.04179756150788246\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 558\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 559\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.35416666666666663\n", + "T-Statistic: -13.436668405820718\n", + "P-Value: 0.000891257804744702\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.35169491525423724\n", + "T-Statistic: -4.748264132284415\n", + "P-Value: 0.017722668444947104\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 559\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 560\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.3236338797814208\n", + "T-Statistic: -3.600554934819319\n", + "P-Value: 0.03674779775320059\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.274364406779661\n", + "T-Statistic: -4.35035650844239\n", + "P-Value: 0.022433171283776563\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 560\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 561\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.2903688524590164\n", + "T-Statistic: -7.987447429719284\n", + "P-Value: 0.004095133323667272\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.19851694915254237\n", + "T-Statistic: -3.3448722836047113\n", + "P-Value: 0.04422697438835633\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 561\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 562\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.45833333333333337\n", + "T-Statistic: -5.720862788355775\n", + "P-Value: 0.010598994876723512\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.41370056497175145\n", + "T-Statistic: -3.4764271828366216\n", + "P-Value: 0.04015895679106413\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 562\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 563\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.3666666666666667\n", + "T-Statistic: -5.605717509713631\n", + "P-Value: 0.01121846256816809\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.341454802259887\n", + "T-Statistic: -2.2689902462968896\n", + "P-Value: 0.10802564058574593\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 563\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 564\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.3857923497267759\n", + "T-Statistic: -2.773401460624423\n", + "P-Value: 0.06937128230425361\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.37153954802259886\n", + "T-Statistic: -3.9815118119797472\n", + "P-Value: 0.028349938061132722\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 564\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 565\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.44583333333333336\n", + "T-Statistic: -8.401275011504453\n", + "P-Value: 0.0035376591875335185\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3755649717514124\n", + "T-Statistic: -6.27344916163044\n", + "P-Value: 0.008176858545156238\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 565\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 566\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.39583333333333337\n", + "T-Statistic: -5.442971730211302\n", + "P-Value: 0.012177401351288936\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3840395480225989\n", + "T-Statistic: -9.780487804878028\n", + "P-Value: 0.002271338003074173\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 566\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 567\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.41250000000000003\n", + "T-Statistic: -4.643102959582609\n", + "P-Value: 0.018831812762783767\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.35451977401129947\n", + "T-Statistic: -4.066666666666665\n", + "P-Value: 0.02682021272712732\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 567\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 568\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.45614754098360655\n", + "T-Statistic: -3.6102043743828824\n", + "P-Value: 0.03649838253815035\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.4050141242937853\n", + "T-Statistic: -3.35873852777973\n", + "P-Value: 0.0437742186193112\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 568\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 569\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.4227459016393443\n", + "T-Statistic: -5.2448185798443125\n", + "P-Value: 0.013495034751188689\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3879943502824859\n", + "T-Statistic: -4.361164169302444\n", + "P-Value: 0.02228506942279346\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 569\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 570\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.425\n", + "T-Statistic: -4.312990302477515\n", + "P-Value: 0.022955131152546198\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3421610169491526\n", + "T-Statistic: -3.2068965517241366\n", + "P-Value: 0.049072424069962796\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 570\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 571\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.4541666666666667\n", + "T-Statistic: -6.130104668195612\n", + "P-Value: 0.008728726955771456\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.405225988700565\n", + "T-Statistic: -3.4882407793268584\n", + "P-Value: 0.03981737779020827\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 571\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 572\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6833333333333333\n", + "Average of Other Ratios: 0.4685109289617486\n", + "T-Statistic: -6.044103218158989\n", + "P-Value: 0.009083452194363606\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.576271186440678\n", + "Average of Other Ratios: 0.4471751412429379\n", + "T-Statistic: -6.645559382413252\n", + "P-Value: 0.006943215258999799\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 572\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 573\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.551844262295082\n", + "T-Statistic: -16.28766880858878\n", + "P-Value: 0.0005035373682223364\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.576271186440678\n", + "Average of Other Ratios: 0.4717514124293785\n", + "T-Statistic: -6.08276253029822\n", + "P-Value: 0.02597846598858569\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 573\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 574\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.3416666666666667\n", + "T-Statistic: -4.923541270396146\n", + "P-Value: 0.016055668617949045\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.2995762711864407\n", + "T-Statistic: -10.972170265475434\n", + "P-Value: 0.0016209009976600197\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 574\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 575\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.4148224043715847\n", + "T-Statistic: -2.935331303657975\n", + "P-Value: 0.06073856617509204\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.33145009416195853\n", + "T-Statistic: -2.8168113585237156\n", + "P-Value: 0.10631108992737348\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 575\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 576\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.39788251366120214\n", + "T-Statistic: -3.995817683425296\n", + "P-Value: 0.028085239330913513\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3713983050847458\n", + "T-Statistic: -3.303424358920434\n", + "P-Value: 0.045616214777893696\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 576\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 577\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.42083333333333334\n", + "T-Statistic: -5.615165491162119\n", + "P-Value: 0.011165876393712151\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.3542372881355932\n", + "T-Statistic: -9.208061462894172\n", + "P-Value: 0.0027091139233435414\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 577\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 578\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.44760928961748636\n", + "T-Statistic: -2.8779672917258896\n", + "P-Value: 0.06363455777153253\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.35007062146892653\n", + "T-Statistic: -8.932852690514894\n", + "P-Value: 0.0029596885792663864\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 578\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 579\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.4125\n", + "T-Statistic: -4.800754235493788\n", + "P-Value: 0.017200753129888367\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3626412429378531\n", + "T-Statistic: -3.365301828336874\n", + "P-Value: 0.04356196753364835\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 579\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 580\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.33749999999999997\n", + "T-Statistic: -6.629461654119429\n", + "P-Value: 0.006991312298326812\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.2785310734463277\n", + "T-Statistic: -5.283646176355578\n", + "P-Value: 0.013222684385745894\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 580\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 581\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4918032786885246\n", + "Average of Other Ratios: 0.4041666666666667\n", + "T-Statistic: -10.98401471759805\n", + "P-Value: 0.0016157647819501492\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.3516949152542373\n", + "T-Statistic: -4.489140335563251\n", + "P-Value: 0.02062388445422241\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 581\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 582\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.4559426229508197\n", + "T-Statistic: -2.024940270686622\n", + "P-Value: 0.13600606005905447\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.40098870056497177\n", + "T-Statistic: -7.988978716899146\n", + "P-Value: 0.004092863743178219\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 582\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 583\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.36878415300546447\n", + "T-Statistic: -2.8875369089248184\n", + "P-Value: 0.06313962819005703\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.28255649717514125\n", + "T-Statistic: -6.786220279071329\n", + "P-Value: 0.0065409101032541635\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 583\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 584\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.3898224043715847\n", + "T-Statistic: -6.736784395106943\n", + "P-Value: 0.006678717888232159\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.33771186440677964\n", + "T-Statistic: -5.3858337188310035\n", + "P-Value: 0.012539411668401262\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 584\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 585\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.4041666666666667\n", + "T-Statistic: -15.59782388553334\n", + "P-Value: 0.0005726503128458851\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.38757062146892657\n", + "T-Statistic: -2.4285714285714284\n", + "P-Value: 0.13584143478190688\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 585\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 586\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.35416666666666663\n", + "T-Statistic: -10.11025517993969\n", + "P-Value: 0.002061088422236367\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3417372881355932\n", + "T-Statistic: -6.45766024002191\n", + "P-Value: 0.007533010897429456\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 586\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 587\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.43135245901639346\n", + "T-Statistic: -8.36023757475581\n", + "P-Value: 0.0035882787219289397\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3964689265536723\n", + "T-Statistic: -4.172537109479583\n", + "P-Value: 0.02506338811524026\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 587\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 588\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.40683060109289615\n", + "T-Statistic: -3.685355043522012\n", + "P-Value: 0.034628080098915076\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3169491525423729\n", + "T-Statistic: -1.9629629629629635\n", + "P-Value: 0.14443412236891323\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 588\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 589\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.37083333333333335\n", + "T-Statistic: -8.342713328952133\n", + "P-Value: 0.0036101866302316646\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.364406779661017\n", + "T-Statistic: -6.301611145596256\n", + "P-Value: 0.008073865317362929\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 589\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 590\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.37916666666666665\n", + "T-Statistic: -22.125683060109264\n", + "P-Value: 0.00020211389888430634\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3463983050847458\n", + "T-Statistic: -2.043874646199097\n", + "P-Value: 0.13354795806792266\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 590\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 591\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.40204918032786885\n", + "T-Statistic: -3.8720368025363827\n", + "P-Value: 0.030485627570044506\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3966101694915254\n", + "T-Statistic: -3.050041359322525\n", + "P-Value: 0.05542620511793901\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 591\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 592\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.41509562841530057\n", + "T-Statistic: -3.918936678185741\n", + "P-Value: 0.029546209898049555\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3632062146892655\n", + "T-Statistic: -2.9222455629486523\n", + "P-Value: 0.06138443122939498\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 592\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 593\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.47540983606557374\n", + "Average of Other Ratios: 0.37083333333333335\n", + "T-Statistic: -2.2816691505216085\n", + "P-Value: 0.10677112400407492\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3459745762711864\n", + "T-Statistic: -3.610360849034176\n", + "P-Value: 0.03649435588128962\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 593\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 594\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4145491803278688\n", + "T-Statistic: -3.225298064940852\n", + "P-Value: 0.04838862749274675\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.33785310734463275\n", + "T-Statistic: -2.653428192571171\n", + "P-Value: 0.07676844842248204\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 594\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 595\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.3833333333333333\n", + "T-Statistic: -5.3934540699330284\n", + "P-Value: 0.012490325558165266\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.346045197740113\n", + "T-Statistic: -3.3888747468281344\n", + "P-Value: 0.04281033448755565\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 595\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 596\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.3523224043715847\n", + "T-Statistic: -6.072607039010736\n", + "P-Value: 0.008963822063323797\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.38333333333333336\n", + "Average of Other Ratios: 0.3305084745762712\n", + "T-Statistic: -2.6185237571323796\n", + "P-Value: 0.07910189090563483\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 596\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 597\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.4125\n", + "T-Statistic: -6.486338797814206\n", + "P-Value: 0.007438839601531942\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3377118644067797\n", + "T-Statistic: -6.269358747755155\n", + "P-Value: 0.00819196116993917\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 597\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 598\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.4363387978142077\n", + "T-Statistic: -3.468352695270501\n", + "P-Value: 0.07401784897865982\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.38389830508474576\n", + "T-Statistic: -8.48622665150705\n", + "P-Value: 0.0034358339331581286\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 598\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 599\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.8333333333333334\n", + "Average of Other Ratios: 0.4443306010928962\n", + "T-Statistic: -13.069044138229902\n", + "P-Value: 0.0009675211431764483\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.559322033898305\n", + "Average of Other Ratios: 0.4096045197740113\n", + "T-Statistic: -4.528095585898831\n", + "P-Value: 0.020150257453059105\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 599\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 600\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.41250000000000003\n", + "T-Statistic: -3.6410524102519206\n", + "P-Value: 0.03571540330483068\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3882062146892655\n", + "T-Statistic: -4.997867054186407\n", + "P-Value: 0.015410451099079129\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 600\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 601\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.194603825136612\n", + "T-Statistic: -7.079275684498563\n", + "P-Value: 0.005796358119040252\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2033898305084746\n", + "Average of Other Ratios: 0.12245762711864408\n", + "T-Statistic: -3.608916325424141\n", + "P-Value: 0.03653155025346661\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 601\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 602\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.2666666666666667\n", + "T-Statistic: -13.671082075940005\n", + "P-Value: 0.0008467582451760971\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3050847457627119\n", + "Average of Other Ratios: 0.24032485875706217\n", + "T-Statistic: -3.0165616144994942\n", + "P-Value: 0.05691423813610457\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 602\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 603\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4426229508196721\n", + "Average of Other Ratios: 0.3041666666666667\n", + "T-Statistic: -3.470708937267535\n", + "P-Value: 0.04032563476307778\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.20247175141242937\n", + "T-Statistic: -11.096292845694657\n", + "P-Value: 0.00156813241982992\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 603\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 604\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.2525273224043716\n", + "T-Statistic: -2.709448750044224\n", + "P-Value: 0.07319856881186551\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.14759887005649716\n", + "T-Statistic: -2.8959560191449962\n", + "P-Value: 0.06270817363169148\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 604\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 605\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4918032786885246\n", + "Average of Other Ratios: 0.21666666666666665\n", + "T-Statistic: -5.554402221351035\n", + "P-Value: 0.011509810078003106\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.1694915254237288\n", + "Average of Other Ratios: 0.1265536723163842\n", + "T-Statistic: -3.972583638518804\n", + "P-Value: 0.02851676236015958\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 605\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 606\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.26939890710382514\n", + "T-Statistic: -3.323461871958838\n", + "P-Value: 0.04493779701217889\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.13933615819209041\n", + "T-Statistic: -4.2672442091017775\n", + "P-Value: 0.02361573435774843\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 606\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 607\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4918032786885246\n", + "Average of Other Ratios: 0.24583333333333332\n", + "T-Statistic: -7.477098647437464\n", + "P-Value: 0.004954385859733728\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.15188323917137478\n", + "T-Statistic: -3.3981981491560638\n", + "P-Value: 0.0767589601591142\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 607\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 608\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.35416666666666663\n", + "T-Statistic: -3.5645342704092955\n", + "P-Value: 0.03769829534729827\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3333333333333333\n", + "Average of Other Ratios: 0.2584745762711864\n", + "T-Statistic: -5.172270386627225\n", + "P-Value: 0.014023853313803805\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 608\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 609\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.38333333333333336\n", + "Average of Other Ratios: 0.2655737704918033\n", + "T-Statistic: -4.795356126981156\n", + "P-Value: 0.017253497348406005\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.17711864406779662\n", + "T-Statistic: -5.84274742881549\n", + "P-Value: 0.00999117759634526\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 609\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 610\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.2583333333333333\n", + "T-Statistic: -8.132012676182216\n", + "P-Value: 0.0038879932294071462\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.25\n", + "Average of Other Ratios: 0.1694915254237288\n", + "T-Statistic: -3.878358759406699\n", + "P-Value: 0.03035678137281933\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 610\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 611\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.39344262295081966\n", + "Average of Other Ratios: 0.21666666666666667\n", + "T-Statistic: -9.819764009062963\n", + "P-Value: 0.0022448478763413245\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.13057909604519774\n", + "T-Statistic: -4.374936505877199\n", + "P-Value: 0.02209816935791197\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 611\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 612\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4426229508196721\n", + "Average of Other Ratios: 0.26666666666666666\n", + "T-Statistic: -2.7725030413659453\n", + "P-Value: 0.06942330291539806\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.18128531073446327\n", + "T-Statistic: -4.128224087608492\n", + "P-Value: 0.025780103759508587\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 612\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 613\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.35\n", + "Average of Other Ratios: 0.21147540983606558\n", + "T-Statistic: -10.383401644446371\n", + "P-Value: 0.001906063349217342\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.1694915254237288\n", + "Average of Other Ratios: 0.10677966101694915\n", + "T-Statistic: -2.53917501033781\n", + "P-Value: 0.12636332880012854\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 613\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 614\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.19166666666666668\n", + "T-Statistic: -22.535357185812448\n", + "P-Value: 0.00019134068124253585\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2033898305084746\n", + "Average of Other Ratios: 0.1518361581920904\n", + "T-Statistic: -3.0820624157214693\n", + "P-Value: 0.054048493565592826\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 614\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 615\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.20416666666666666\n", + "T-Statistic: -12.92280294143108\n", + "P-Value: 0.0010002691021999664\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.15254237288135594\n", + "Average of Other Ratios: 0.12648305084745762\n", + "T-Statistic: -2.492276285625687\n", + "P-Value: 0.0883063687998794\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 615\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 616\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.25416666666666665\n", + "T-Statistic: -11.210456983480942\n", + "P-Value: 0.001521591601458421\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.21101694915254238\n", + "T-Statistic: -4.841386618546792\n", + "P-Value: 0.016810392289180406\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 616\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 617\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.21666666666666667\n", + "T-Statistic: -15.150632180505205\n", + "P-Value: 0.0006243223321137057\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2033898305084746\n", + "Average of Other Ratios: 0.13465160075329566\n", + "T-Statistic: -2.718662067942085\n", + "P-Value: 0.11285123312157036\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 617\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 618\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.22499999999999998\n", + "T-Statistic: -6.918738471764206\n", + "P-Value: 0.006189509895466707\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.1864406779661017\n", + "Average of Other Ratios: 0.1602401129943503\n", + "T-Statistic: -2.531317735884927\n", + "P-Value: 0.08532554275225189\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 618\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 619\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.2528688524590164\n", + "T-Statistic: -8.617015849137902\n", + "P-Value: 0.0032865107935262\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.288135593220339\n", + "Average of Other Ratios: 0.19406779661016949\n", + "T-Statistic: -8.748025509254019\n", + "P-Value: 0.0031454221399862072\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 619\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 620\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.23620218579234975\n", + "T-Statistic: -5.140402367328491\n", + "P-Value: 0.014264695462063416\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.15176553672316384\n", + "T-Statistic: -5.734186911097393\n", + "P-Value: 0.01053023168597254\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 620\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 621\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.2791666666666667\n", + "T-Statistic: -9.365095173805795\n", + "P-Value: 0.0025786301461165144\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.25296610169491524\n", + "T-Statistic: -2.7041039897995947\n", + "P-Value: 0.07353015997518966\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 621\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 622\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.22916666666666663\n", + "T-Statistic: -8.306260504867486\n", + "P-Value: 0.003656327731735171\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2033898305084746\n", + "Average of Other Ratios: 0.15190677966101696\n", + "T-Statistic: -7.409133468514449\n", + "P-Value: 0.005086235812941152\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 622\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 623\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.2375\n", + "T-Statistic: -7.720607509620097\n", + "P-Value: 0.004517414616084237\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23333333333333334\n", + "Average of Other Ratios: 0.15254237288135594\n", + "T-Statistic: -3.5204166420089162\n", + "P-Value: 0.038905627181690466\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 623\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 624\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7540983606557377\n", + "Average of Other Ratios: 0.15833333333333333\n", + "T-Statistic: -14.492905117377825\n", + "P-Value: 0.0007122141225715405\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.1899717514124294\n", + "T-Statistic: -5.4166289591447665\n", + "P-Value: 0.012342580506632655\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 624\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 625\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4098360655737705\n", + "Average of Other Ratios: 0.23333333333333334\n", + "T-Statistic: -8.20310571032784\n", + "P-Value: 0.0037911953280657033\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.1435734463276836\n", + "T-Statistic: -5.128214329323895\n", + "P-Value: 0.01435823217533278\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 625\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 626\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.2568989071038251\n", + "T-Statistic: -6.954617925271148\n", + "P-Value: 0.006098663645228381\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.16864406779661018\n", + "T-Statistic: -6.5059819266483885\n", + "P-Value: 0.00737522940630693\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 626\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 627\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.22916666666666669\n", + "T-Statistic: -17.17085230252723\n", + "P-Value: 0.00043034591455487535\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23333333333333334\n", + "Average of Other Ratios: 0.20338983050847456\n", + "T-Statistic: -2.4984439601924695\n", + "P-Value: 0.08782706589330419\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 627\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 628\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.36666666666666664\n", + "Average of Other Ratios: 0.2658469945355191\n", + "T-Statistic: -2.8102104403444548\n", + "P-Value: 0.06728103964001445\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.18559322033898307\n", + "T-Statistic: -8.275546059124256\n", + "P-Value: 0.0036958123216244147\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 628\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 629\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.5185109289617487\n", + "T-Statistic: -7.578250897242925\n", + "P-Value: 0.004766379016909529\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5423728813559322\n", + "Average of Other Ratios: 0.48107344632768356\n", + "T-Statistic: -2.69383410385129\n", + "P-Value: 0.07417255068242208\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 629\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 630\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.75\n", + "Average of Other Ratios: 0.19460382513661204\n", + "T-Statistic: -10.85330590771855\n", + "P-Value: 0.0016736652012390987\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.1643361581920904\n", + "T-Statistic: -5.023778124999512\n", + "P-Value: 0.015193473744699431\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 630\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 631\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.35\n", + "Average of Other Ratios: 0.23203551912568304\n", + "T-Statistic: -2.7592753489500113\n", + "P-Value: 0.07019486191792593\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.15254237288135594\n", + "Average of Other Ratios: 0.11233521657250471\n", + "T-Statistic: -2.7098687752823145\n", + "P-Value: 0.11346466393886437\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 631\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 632\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.27404371584699455\n", + "T-Statistic: -3.6739700755121674\n", + "P-Value: 0.03490341382115382\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3050847457627119\n", + "Average of Other Ratios: 0.1689265536723164\n", + "T-Statistic: -5.86064261618386\n", + "P-Value: 0.00990581703436056\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 632\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 633\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.2650273224043716\n", + "T-Statistic: -2.3392495841230168\n", + "P-Value: 0.10129085666135956\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.15600282485875705\n", + "T-Statistic: -3.1321496288779005\n", + "P-Value: 0.051978794780651254\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 633\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 634\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.47540983606557374\n", + "Average of Other Ratios: 0.2791666666666667\n", + "T-Statistic: -4.2816691505216085\n", + "P-Value: 0.023404806469187092\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.17281073446327683\n", + "T-Statistic: -2.714263880607111\n", + "P-Value: 0.07290142241990857\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 634\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 635\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.24487704918032788\n", + "T-Statistic: -16.399313267142833\n", + "P-Value: 0.0004934130615457107\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.16878531073446326\n", + "T-Statistic: -4.294117647058823\n", + "P-Value: 0.023224732577996746\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 635\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 636\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.25314207650273224\n", + "T-Statistic: -4.511368522252938\n", + "P-Value: 0.02035188309309117\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2\n", + "Average of Other Ratios: 0.17372881355932202\n", + "T-Statistic: -6.200000000000003\n", + "P-Value: 0.008453719117567243\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 636\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 637\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.42500000000000004\n", + "T-Statistic: -7.254802469447766\n", + "P-Value: 0.005403292565475987\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5423728813559322\n", + "Average of Other Ratios: 0.3711158192090396\n", + "T-Statistic: -5.424838731783427\n", + "P-Value: 0.01229078962720508\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 637\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 638\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.2614071038251366\n", + "T-Statistic: -6.303666365112512\n", + "P-Value: 0.008066415912528251\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.288135593220339\n", + "Average of Other Ratios: 0.18566384180790962\n", + "T-Statistic: -4.932949247691678\n", + "P-Value: 0.01597207780336412\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 638\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 639\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45901639344262296\n", + "Average of Other Ratios: 0.26249999999999996\n", + "T-Statistic: -4.999367050456228\n", + "P-Value: 0.015397780557052671\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.26666666666666666\n", + "Average of Other Ratios: 0.19491525423728812\n", + "T-Statistic: -6.558251799577894\n", + "P-Value: 0.0072094124531609394\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 639\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 640\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.2284153005464481\n", + "T-Statistic: -14.21620797979023\n", + "P-Value: 0.0007541140145429063\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.1694915254237288\n", + "Average of Other Ratios: 0.12662429378531073\n", + "T-Statistic: -4.954648074701186\n", + "P-Value: 0.01578143114842512\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 640\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 641\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.24166666666666667\n", + "T-Statistic: -17.24695131862833\n", + "P-Value: 0.0004247197725637856\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.18135593220338986\n", + "T-Statistic: -7.923076923076922\n", + "P-Value: 0.004192062411449591\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 641\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 642\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.23210382513661199\n", + "T-Statistic: -5.707259199146484\n", + "P-Value: 0.010669808386105378\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23333333333333334\n", + "Average of Other Ratios: 0.1398305084745763\n", + "T-Statistic: -4.195254538472044\n", + "P-Value: 0.02470591794360766\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 642\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 643\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.36666666666666664\n", + "Average of Other Ratios: 0.24446721311475408\n", + "T-Statistic: -3.2958071028221907\n", + "P-Value: 0.045877518615205004\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.1730225988700565\n", + "T-Statistic: -4.389315939938801\n", + "P-Value: 0.021905189439865372\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 643\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 644\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.2666666666666667\n", + "T-Statistic: -11.653770786981173\n", + "P-Value: 0.0013573049832111713\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.17718926553672315\n", + "T-Statistic: -9.34117796358571\n", + "P-Value: 0.0025979582105049733\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 644\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 645\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3770491803278688\n", + "Average of Other Ratios: 0.24583333333333335\n", + "T-Statistic: -4.076914857521508\n", + "P-Value: 0.02664334911823053\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.13483992467043315\n", + "T-Statistic: -8.711460465745363\n", + "P-Value: 0.012922185254776281\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 645\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 646\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4918032786885246\n", + "Average of Other Ratios: 0.2333333333333333\n", + "T-Statistic: -4.863758570369447\n", + "P-Value: 0.016600374637456124\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.1812853107344633\n", + "T-Statistic: -2.856098645795084\n", + "P-Value: 0.06478389664989048\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 646\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 647\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.2\n", + "T-Statistic: -6.147724135202747\n", + "P-Value: 0.008658307683879244\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.1694915254237288\n", + "Average of Other Ratios: 0.1266949152542373\n", + "T-Statistic: -3.796706309892804\n", + "P-Value: 0.032076336335065445\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 647\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 648\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.23333333333333334\n", + "T-Statistic: -17.08972839908021\n", + "P-Value: 0.00043645329683166667\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.18333333333333332\n", + "Average of Other Ratios: 0.13559322033898308\n", + "T-Statistic: -3.983368200684214\n", + "P-Value: 0.02831540894214516\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 648\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 649\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4426229508196721\n", + "Average of Other Ratios: 0.2583333333333333\n", + "T-Statistic: -6.667849243203331\n", + "P-Value: 0.006877333446979579\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.16913841807909605\n", + "T-Statistic: -2.2349882150268345\n", + "P-Value: 0.11147884166525937\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 649\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 650\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.47540983606557374\n", + "Average of Other Ratios: 0.23333333333333334\n", + "T-Statistic: -9.186157399702243\n", + "P-Value: 0.0027280043187776987\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.19858757062146892\n", + "T-Statistic: -2.5535821728149255\n", + "P-Value: 0.08368093910992941\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 650\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 651\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.4375\n", + "T-Statistic: -1.9737703484788938\n", + "P-Value: 0.14292112743640065\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.39653954802259883\n", + "T-Statistic: -2.821061725972151\n", + "P-Value: 0.06667985231722355\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 651\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 652\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.2614071038251366\n", + "T-Statistic: -5.835105865583961\n", + "P-Value: 0.010027921838910365\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.22365819209039547\n", + "T-Statistic: -6.611650272701572\n", + "P-Value: 0.007045041561568846\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 652\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 653\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.18640710382513662\n", + "T-Statistic: -8.230490181795378\n", + "P-Value: 0.0037547595934809518\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.15254237288135594\n", + "Average of Other Ratios: 0.1290960451977401\n", + "T-Statistic: -1.6375555461118865\n", + "P-Value: 0.24316808629278763\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 653\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 654\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.2583333333333333\n", + "T-Statistic: -15.901213347642667\n", + "P-Value: 0.0005407927864783102\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.19830508474576272\n", + "T-Statistic: -5.461092327709232\n", + "P-Value: 0.01206545301607257\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 654\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 655\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.2823087431693989\n", + "T-Statistic: -10.150428532833908\n", + "P-Value: 0.0020372645895073123\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3220338983050847\n", + "Average of Other Ratios: 0.16927966101694913\n", + "T-Statistic: -3.519195236968839\n", + "P-Value: 0.038939748753238246\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 655\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 656\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.23750000000000002\n", + "T-Statistic: -6.178438384650274\n", + "P-Value: 0.008537325917522763\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.1772598870056497\n", + "T-Statistic: -6.871842709362766\n", + "P-Value: 0.00631095591061526\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 656\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 657\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4426229508196721\n", + "Average of Other Ratios: 0.27083333333333337\n", + "T-Statistic: -3.3626410531598294\n", + "P-Value: 0.04364785672227368\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.288135593220339\n", + "Average of Other Ratios: 0.21094632768361582\n", + "T-Statistic: -3.6366923315065693\n", + "P-Value: 0.035824758686131074\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 657\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 658\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.3445355191256831\n", + "T-Statistic: -1.8802591489231106\n", + "P-Value: 0.15665493572520436\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.2918079096045198\n", + "T-Statistic: -2.173368590328213\n", + "P-Value: 0.16182504545842755\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 658\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 659\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.47540983606557374\n", + "Average of Other Ratios: 0.25416666666666665\n", + "T-Statistic: -4.330653825520752\n", + "P-Value: 0.022706457712737285\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2833333333333333\n", + "Average of Other Ratios: 0.1610169491525424\n", + "T-Statistic: -3.7266706420173588\n", + "P-Value: 0.033651949508593376\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 659\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 660\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.36065573770491804\n", + "Average of Other Ratios: 0.30416666666666664\n", + "T-Statistic: -3.0571045827459344\n", + "P-Value: 0.05511854521199261\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3050847457627119\n", + "Average of Other Ratios: 0.18524011299435028\n", + "T-Statistic: -3.1980807339949875\n", + "P-Value: 0.04940434566953938\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 660\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 661\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.24023224043715846\n", + "T-Statistic: -3.433838233132664\n", + "P-Value: 0.0414217747379\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.1602401129943503\n", + "T-Statistic: -4.406927169154064\n", + "P-Value: 0.02167179821796596\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 661\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 662\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3770491803278688\n", + "Average of Other Ratios: 0.24166666666666664\n", + "T-Statistic: -2.7999079904008193\n", + "P-Value: 0.06785808116951951\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.16468926553672317\n", + "T-Statistic: -8.002907374821234\n", + "P-Value: 0.004072295634170163\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 662\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 663\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.2189207650273224\n", + "T-Statistic: -2.8547072311071635\n", + "P-Value: 0.06485789803329635\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2033898305084746\n", + "Average of Other Ratios: 0.13919491525423727\n", + "T-Statistic: -8.229362053239296\n", + "P-Value: 0.003756251434707851\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 663\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 664\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.4708333333333333\n", + "T-Statistic: -2.930782830598391\n", + "P-Value: 0.06096209353124864\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.44067796610169496\n", + "T-Statistic: -2.3309944988213394\n", + "P-Value: 0.10205524591620584\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 664\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 665\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.24166666666666664\n", + "T-Statistic: -19.60151487691337\n", + "P-Value: 0.0002900998829147762\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.16899717514124293\n", + "T-Statistic: -3.0669989094316277\n", + "P-Value: 0.054691170931315904\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 665\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 666\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.27329234972677596\n", + "T-Statistic: -2.6358380883941255\n", + "P-Value: 0.07793359764901263\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.21666666666666667\n", + "Average of Other Ratios: 0.15677966101694918\n", + "T-Statistic: -2.8266666666666658\n", + "P-Value: 0.06637194694612157\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 666\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 667\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.2693306010928962\n", + "T-Statistic: -5.264461390477094\n", + "P-Value: 0.013356347106673735\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.17740112994350282\n", + "T-Statistic: -2.5678020379180455\n", + "P-Value: 0.0826509420493295\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 667\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 668\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.22418032786885245\n", + "T-Statistic: -12.077933694787529\n", + "P-Value: 0.0012214555571584704\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.19399717514124293\n", + "T-Statistic: -7.533632446839843\n", + "P-Value: 0.00484812769882256\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 668\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 669\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.19467213114754098\n", + "T-Statistic: -7.753219242030175\n", + "P-Value: 0.004462810515423161\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.21666666666666667\n", + "Average of Other Ratios: 0.1228813559322034\n", + "T-Statistic: -4.990929625938783\n", + "P-Value: 0.015469228914864434\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 669\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 670\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.4375\n", + "T-Statistic: -4.557262825860137\n", + "P-Value: 0.01980483320019867\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.4007768361581921\n", + "T-Statistic: -2.3943057732222077\n", + "P-Value: 0.09636694644723628\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 670\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 671\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.8\n", + "Average of Other Ratios: 0.3069672131147541\n", + "T-Statistic: -32.249362363733205\n", + "P-Value: 6.552485880934655e-05\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.3036723163841808\n", + "T-Statistic: -2.439750182371333\n", + "P-Value: 0.09252085884337388\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 671\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 672\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.29836065573770487\n", + "T-Statistic: -4.028754863050068\n", + "P-Value: 0.027487792515242023\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.1864406779661017\n", + "Average of Other Ratios: 0.14058380414312618\n", + "T-Statistic: -2.9676921508875798\n", + "P-Value: 0.0972607614111294\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 672\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 673\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.21666666666666667\n", + "T-Statistic: -24.377049180327866\n", + "P-Value: 0.0001513221713591045\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.18997175141242936\n", + "T-Statistic: -2.727682454452322\n", + "P-Value: 0.07208120831037232\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 673\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 674\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.24870218579234973\n", + "T-Statistic: -9.945483469303717\n", + "P-Value: 0.0021627595309605323\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.17323446327683617\n", + "T-Statistic: -2.443879350950321\n", + "P-Value: 0.09218076354483482\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 674\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 675\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.2364071038251366\n", + "T-Statistic: -7.416787809974502\n", + "P-Value: 0.005071157125568877\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.1606638418079096\n", + "T-Statistic: -2.8273201672963535\n", + "P-Value: 0.06633616259613716\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 675\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 676\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.25\n", + "T-Statistic: -9.035003149610084\n", + "P-Value: 0.0028632366668939696\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.16885593220338985\n", + "T-Statistic: -6.94311791030617\n", + "P-Value: 0.00612758913066015\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 676\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 677\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.25724043715846995\n", + "T-Statistic: -3.574953217198332\n", + "P-Value: 0.03742017108862267\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2833333333333333\n", + "Average of Other Ratios: 0.1694915254237288\n", + "T-Statistic: -8.226203052846838\n", + "P-Value: 0.003760433078739023\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 677\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 678\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.26967213114754096\n", + "T-Statistic: -18.098951116688973\n", + "P-Value: 0.0003679240150022286\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.21511299435028247\n", + "T-Statistic: -3.898316388834917\n", + "P-Value: 0.029954591830056016\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 678\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 679\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.273155737704918\n", + "T-Statistic: -2.9827908163528103\n", + "P-Value: 0.05846639473064487\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.26666666666666666\n", + "Average of Other Ratios: 0.15677966101694915\n", + "T-Statistic: -4.3423725516066725\n", + "P-Value: 0.022543397382713576\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 679\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 680\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3442622950819672\n", + "Average of Other Ratios: 0.20416666666666666\n", + "T-Statistic: -3.023496692892297\n", + "P-Value: 0.0566019195281106\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2\n", + "Average of Other Ratios: 0.14830508474576273\n", + "T-Statistic: -12.19999999999999\n", + "P-Value: 0.001185729108116372\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 680\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 681\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.36666666666666664\n", + "Average of Other Ratios: 0.25293715846994536\n", + "T-Statistic: -3.3702819227387244\n", + "P-Value: 0.04340178717234347\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.12245762711864408\n", + "T-Statistic: -8.865808969091209\n", + "P-Value: 0.0030253402213844243\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 681\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 682\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7377049180327869\n", + "Average of Other Ratios: 0.19999999999999998\n", + "T-Statistic: -17.24492210110332\n", + "P-Value: 0.00042486852229209587\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.21666666666666667\n", + "Average of Other Ratios: 0.15254237288135594\n", + "T-Statistic: -4.144434018455757\n", + "P-Value: 0.025514895723579813\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 682\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 683\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.38333333333333336\n", + "Average of Other Ratios: 0.2944672131147541\n", + "T-Statistic: -2.8051586461529827\n", + "P-Value: 0.06756322441457491\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.1899011299435028\n", + "T-Statistic: -7.530600502193264\n", + "P-Value: 0.004853749660576098\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 683\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 684\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.14583333333333334\n", + "T-Statistic: -17.025299295981913\n", + "P-Value: 0.00044138624174763324\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.13333333333333333\n", + "Average of Other Ratios: 0.1016949152542373\n", + "T-Statistic: -3.2331615074619027\n", + "P-Value: 0.048100099388329834\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 684\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 685\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4426229508196721\n", + "Average of Other Ratios: 0.275\n", + "T-Statistic: -2.9340384355396165\n", + "P-Value: 0.060801997551779355\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.1899717514124294\n", + "T-Statistic: -3.1834100610600315\n", + "P-Value: 0.04996302047691425\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 685\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 686\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4098360655737705\n", + "Average of Other Ratios: 0.30833333333333335\n", + "T-Statistic: -5.11676115950563\n", + "P-Value: 0.014446859559659811\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.18566384180790962\n", + "T-Statistic: -4.298438567165345\n", + "P-Value: 0.023162647133957697\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 686\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 687\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.2201502732240437\n", + "T-Statistic: -3.2307527644980745\n", + "P-Value: 0.04818825006053326\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.18333333333333332\n", + "Average of Other Ratios: 0.15677966101694918\n", + "T-Statistic: -6.266666666666664\n", + "P-Value: 0.008201920869688295\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 687\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 688\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4262295081967213\n", + "Average of Other Ratios: 0.2791666666666667\n", + "T-Statistic: -10.333341919958322\n", + "P-Value: 0.0019332884778574828\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.2027542372881356\n", + "T-Statistic: -3.621297736079414\n", + "P-Value: 0.0362143108175595\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 688\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 689\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.21577868852459017\n", + "T-Statistic: -9.111666695534357\n", + "P-Value: 0.0027935663080181916\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.1864406779661017\n", + "Average of Other Ratios: 0.16454802259887005\n", + "T-Statistic: -5.39640733462664\n", + "P-Value: 0.012471369780280921\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 689\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 690\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4262295081967213\n", + "Average of Other Ratios: 0.21250000000000002\n", + "T-Statistic: -8.929327630907842\n", + "P-Value: 0.002963093223014625\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.1864406779661017\n", + "Average of Other Ratios: 0.11807909604519774\n", + "T-Statistic: -10.57181908559856\n", + "P-Value: 0.0018080338565436176\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 690\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 691\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4918032786885246\n", + "Average of Other Ratios: 0.4375\n", + "T-Statistic: -2.2687169915537657\n", + "P-Value: 0.10805287324168507\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.3463276836158192\n", + "T-Statistic: -6.147522027439089\n", + "P-Value: 0.008659111206240855\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 691\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 692\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.2916666666666667\n", + "T-Statistic: -6.359769644709016\n", + "P-Value: 0.007866504158921796\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3050847457627119\n", + "Average of Other Ratios: 0.2615819209039548\n", + "T-Statistic: -4.063904567563298\n", + "P-Value: 0.026868139751298294\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 692\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 693\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.39344262295081966\n", + "Average of Other Ratios: 0.26666666666666666\n", + "T-Statistic: -1.8012412407306406\n", + "P-Value: 0.1694691787567682\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.1644774011299435\n", + "T-Statistic: -5.495043416350755\n", + "P-Value: 0.011859301638986507\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 693\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 694\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.49166666666666675\n", + "T-Statistic: -6.916091828453289\n", + "P-Value: 0.006196281549258644\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5423728813559322\n", + "Average of Other Ratios: 0.45593220338983054\n", + "T-Statistic: -4.34666107141259\n", + "P-Value: 0.02248410342384351\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 694\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 695\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.36666666666666664\n", + "Average of Other Ratios: 0.2776639344262295\n", + "T-Statistic: -1.7553554396997046\n", + "P-Value: 0.1774643858274755\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.2068502824858757\n", + "T-Statistic: -3.4595325465113227\n", + "P-Value: 0.04065396019819478\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 695\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 696\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.36065573770491804\n", + "Average of Other Ratios: 0.2916666666666667\n", + "T-Statistic: -1.899261403354651\n", + "P-Value: 0.15374343061615317\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.17711864406779662\n", + "T-Statistic: -8.161598246688756\n", + "P-Value: 0.0038473182454779415\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 696\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 697\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.26584699453551913\n", + "T-Statistic: -4.361077099679005\n", + "P-Value: 0.02228625751121354\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.1730225988700565\n", + "T-Statistic: -6.742490376010055\n", + "P-Value: 0.006662617945926082\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 697\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 698\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4262295081967213\n", + "Average of Other Ratios: 0.27083333333333337\n", + "T-Statistic: -3.735739626625215\n", + "P-Value: 0.03344241564305693\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.288135593220339\n", + "Average of Other Ratios: 0.15176553672316384\n", + "T-Statistic: -5.775098949219386\n", + "P-Value: 0.010322711204428422\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 698\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 699\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.47540983606557374\n", + "Average of Other Ratios: 0.2375\n", + "T-Statistic: -7.082180009413329\n", + "P-Value: 0.0057895528079337285\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2033898305084746\n", + "Average of Other Ratios: 0.1434322033898305\n", + "T-Statistic: -13.161604379732514\n", + "P-Value: 0.0009475271511557978\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 699\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 700\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4262295081967213\n", + "Average of Other Ratios: 0.25416666666666665\n", + "T-Statistic: -4.377269933993477\n", + "P-Value: 0.02206670433265803\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.18121468926553674\n", + "T-Statistic: -2.37554254984018\n", + "P-Value: 0.0980116289989706\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 700\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 701\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45901639344262296\n", + "Average of Other Ratios: 0.24583333333333332\n", + "T-Statistic: -5.194910461282009\n", + "P-Value: 0.013855966641037953\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.21666666666666667\n", + "Average of Other Ratios: 0.1440677966101695\n", + "T-Statistic: -4.11529355279074\n", + "P-Value: 0.025994211990808435\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 701\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 702\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.28169398907103826\n", + "T-Statistic: -5.477980736046312\n", + "P-Value: 0.011962325656887837\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.19837570621468928\n", + "T-Statistic: -3.4094802718971997\n", + "P-Value: 0.042166766315337265\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 702\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 703\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.2239754098360656\n", + "T-Statistic: -10.180119908482158\n", + "P-Value: 0.0020198909534647554\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.288135593220339\n", + "Average of Other Ratios: 0.1731638418079096\n", + "T-Statistic: -7.679245283018867\n", + "P-Value: 0.004587946327225194\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 703\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 704\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4918032786885246\n", + "Average of Other Ratios: 0.2125\n", + "T-Statistic: -4.076969624956957\n", + "P-Value: 0.0266424079833841\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.21666666666666667\n", + "Average of Other Ratios: 0.1440677966101695\n", + "T-Statistic: -4.945967306057795\n", + "P-Value: 0.015857343159367175\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 704\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 705\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.3689890710382514\n", + "T-Statistic: -4.353209549753575\n", + "P-Value: 0.022393951473013263\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3418079096045198\n", + "T-Statistic: -5.938574464184704\n", + "P-Value: 0.00954502881900154\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 705\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 706\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4098360655737705\n", + "Average of Other Ratios: 0.23333333333333334\n", + "T-Statistic: -3.01552163240712\n", + "P-Value: 0.05696125979179388\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.1864406779661017\n", + "Average of Other Ratios: 0.13509887005649718\n", + "T-Statistic: -4.199506762976739\n", + "P-Value: 0.02463973970623848\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 706\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 707\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.20416666666666666\n", + "T-Statistic: -18.1470510379736\n", + "P-Value: 0.0003650272002577954\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.1864406779661017\n", + "Average of Other Ratios: 0.1346045197740113\n", + "T-Statistic: -2.8897637795275584\n", + "P-Value: 0.21209033041734834\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 707\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 708\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.2125\n", + "T-Statistic: -8.356089793398965\n", + "P-Value: 0.003593448152596275\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2033898305084746\n", + "Average of Other Ratios: 0.14343220338983054\n", + "T-Statistic: -4.115302637249344\n", + "P-Value: 0.025994060763921405\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 708\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 709\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.4937841530054645\n", + "T-Statistic: -13.399578464312397\n", + "P-Value: 0.000898581498174531\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5932203389830508\n", + "Average of Other Ratios: 0.4980225988700565\n", + "T-Statistic: -2.5424731748068985\n", + "P-Value: 0.08449660749382112\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 709\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 710\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.47540983606557374\n", + "Average of Other Ratios: 0.22916666666666669\n", + "T-Statistic: -5.559506115611259\n", + "P-Value: 0.01148039203424712\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.17747175141242938\n", + "T-Statistic: -4.301532117285934\n", + "P-Value: 0.023118328990842697\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 710\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 711\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.2409153005464481\n", + "T-Statistic: -5.275556807499205\n", + "P-Value: 0.013278830036287586\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3333333333333333\n", + "Average of Other Ratios: 0.2076271186440678\n", + "T-Statistic: -3.6796971261465163\n", + "P-Value: 0.03476456233229096\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 711\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 712\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.39344262295081966\n", + "Average of Other Ratios: 0.25\n", + "T-Statistic: -4.714005617872331\n", + "P-Value: 0.018074445236289073\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.17309322033898306\n", + "T-Statistic: -2.2132516655917454\n", + "P-Value: 0.11375613879223859\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 712\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 713\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.3041666666666667\n", + "T-Statistic: -6.34727085863382\n", + "P-Value: 0.007910472276561024\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.17323446327683617\n", + "T-Statistic: -3.2772900040790653\n", + "P-Value: 0.046520687780565555\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 713\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 714\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.38333333333333336\n", + "Average of Other Ratios: 0.2657103825136612\n", + "T-Statistic: -3.2393534601694043\n", + "P-Value: 0.04787443221098791\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.18149717514124292\n", + "T-Statistic: -14.714285714285706\n", + "P-Value: 0.0006808928260169268\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 714\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 715\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.4649590163934426\n", + "T-Statistic: -6.9498273973418225\n", + "P-Value: 0.0061106911480797435\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.417725988700565\n", + "T-Statistic: -10.18365359023622\n", + "P-Value: 0.0020178363490044415\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 715\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 716\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.8360655737704918\n", + "Average of Other Ratios: 0.4375\n", + "T-Statistic: -8.601708046331813\n", + "P-Value: 0.0033035386244482587\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.4609227871939736\n", + "T-Statistic: -2.429062741718821\n", + "P-Value: 0.13579717414652953\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 716\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 717\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.2448087431693989\n", + "T-Statistic: -8.253674606606968\n", + "P-Value: 0.0037242731002333166\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.2024717514124294\n", + "T-Statistic: -4.532253548113998\n", + "P-Value: 0.020100540272823354\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 717\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 718\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.35\n", + "Average of Other Ratios: 0.24057377049180328\n", + "T-Statistic: -5.639493439517238\n", + "P-Value: 0.011031947405375732\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.14774011299435028\n", + "T-Statistic: -3.52461629327718\n", + "P-Value: 0.03878859478065401\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 718\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 719\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.21530054644808744\n", + "T-Statistic: -9.600064444773704\n", + "P-Value: 0.002398495294738282\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23333333333333334\n", + "Average of Other Ratios: 0.16101694915254236\n", + "T-Statistic: -6.609891577527327\n", + "P-Value: 0.007050376214881156\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 719\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 720\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.26140710382513666\n", + "T-Statistic: -8.577234238877079\n", + "P-Value: 0.003331005620411765\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.21525423728813559\n", + "T-Statistic: -6.513721780101715\n", + "P-Value: 0.007350361959320121\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 720\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 721\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7704918032786885\n", + "Average of Other Ratios: 0.43333333333333335\n", + "T-Statistic: -18.728885297038175\n", + "P-Value: 0.00033227343416865454\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.4175847457627119\n", + "T-Statistic: -8.4047123277365\n", + "P-Value: 0.0035334622322826825\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 721\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 722\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.27827868852459015\n", + "T-Statistic: -3.3172720549176096\n", + "P-Value: 0.04514599422781829\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3\n", + "Average of Other Ratios: 0.1652542372881356\n", + "T-Statistic: -6.045729350003519\n", + "P-Value: 0.009076571184230395\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 722\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 723\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.2375\n", + "T-Statistic: -10.122383386195422\n", + "P-Value: 0.0020538573428606206\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.16440677966101697\n", + "T-Statistic: -2.6381526558147996\n", + "P-Value: 0.07777904054371029\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 723\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 724\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45901639344262296\n", + "Average of Other Ratios: 0.3\n", + "T-Statistic: -3.5639741636483593\n", + "P-Value: 0.037713321483800306\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3\n", + "Average of Other Ratios: 0.19491525423728814\n", + "T-Statistic: -3.201666232864797\n", + "P-Value: 0.04926900790572485\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 724\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 725\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.36666666666666664\n", + "Average of Other Ratios: 0.24480874316939888\n", + "T-Statistic: -2.9309984722316638\n", + "P-Value: 0.06095147293868425\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.1864406779661017\n", + "Average of Other Ratios: 0.15176553672316384\n", + "T-Statistic: -3.080747359192189\n", + "P-Value: 0.05410421984381787\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 725\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 726\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.31912568306010924\n", + "T-Statistic: -2.530970883389505\n", + "P-Value: 0.08535147636509026\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3220338983050847\n", + "Average of Other Ratios: 0.19823446327683616\n", + "T-Statistic: -7.231156579885582\n", + "P-Value: 0.005454152940167432\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 726\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 727\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.24453551912568305\n", + "T-Statistic: -6.751934642062348\n", + "P-Value: 0.006636082160327463\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.26666666666666666\n", + "Average of Other Ratios: 0.18220338983050846\n", + "T-Statistic: -4.8345434590575715\n", + "P-Value: 0.016875322539984233\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 727\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 728\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3333333333333333\n", + "Average of Other Ratios: 0.24890710382513662\n", + "T-Statistic: -2.3788710766712677\n", + "P-Value: 0.14042438082786876\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.15621468926553672\n", + "T-Statistic: -6.014126755966144\n", + "P-Value: 0.009211533394928783\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 728\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 729\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4769125683060109\n", + "T-Statistic: -3.385054287067082\n", + "P-Value: 0.04293102665536962\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.4514124293785311\n", + "T-Statistic: -2.773457470011942\n", + "P-Value: 0.06936804083315011\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 729\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 730\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.24904371584699453\n", + "T-Statistic: -16.87653237880174\n", + "P-Value: 0.00045306323873643635\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.1942090395480226\n", + "T-Statistic: -6.695816773490067\n", + "P-Value: 0.006795828785145915\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 730\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 731\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4426229508196721\n", + "Average of Other Ratios: 0.25416666666666665\n", + "T-Statistic: -4.94476472119911\n", + "P-Value: 0.01586789705539504\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.19823446327683614\n", + "T-Statistic: -4.580754969794434\n", + "P-Value: 0.019532183920823603\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 731\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 732\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.39344262295081966\n", + "Average of Other Ratios: 0.17916666666666667\n", + "T-Statistic: -7.782330369943198\n", + "P-Value: 0.004414801539065831\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2\n", + "Average of Other Ratios: 0.11440677966101695\n", + "T-Statistic: -4.899219625733926\n", + "P-Value: 0.016274424287796678\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 732\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 733\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.29829234972677593\n", + "T-Statistic: -3.6409046416348776\n", + "P-Value: 0.035719102507377344\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3220338983050847\n", + "Average of Other Ratios: 0.21504237288135594\n", + "T-Statistic: -2.93071839321159\n", + "P-Value: 0.060965267594478456\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 733\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 734\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.18333333333333335\n", + "T-Statistic: -31.10048861943559\n", + "P-Value: 7.303903972764834e-05\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.1772598870056497\n", + "T-Statistic: -4.332361052668546\n", + "P-Value: 0.022682607763125775\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 734\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 735\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.2\n", + "T-Statistic: -9.42090076205074\n", + "P-Value: 0.0025342671020132644\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2033898305084746\n", + "Average of Other Ratios: 0.1348870056497175\n", + "T-Statistic: -2.4242425425396985\n", + "P-Value: 0.09381189733415801\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 735\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 736\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.22499999999999998\n", + "T-Statistic: -5.482448602285279\n", + "P-Value: 0.011935236042507276\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.17747175141242938\n", + "T-Statistic: -2.3175577935580045\n", + "P-Value: 0.10331445267128257\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 736\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 737\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.5806010928961749\n", + "T-Statistic: -3.3948578066343797\n", + "P-Value: 0.04262218843494368\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5932203389830508\n", + "Average of Other Ratios: 0.5064971751412429\n", + "T-Statistic: -3.4740867918071037\n", + "P-Value: 0.04022706931462362\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 737\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 738\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.29166666666666663\n", + "T-Statistic: -8.21042730655783\n", + "P-Value: 0.0037814081546606237\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.35\n", + "Average of Other Ratios: 0.2542372881355932\n", + "T-Statistic: -5.230883563714916\n", + "P-Value: 0.013594564408150057\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 738\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 739\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.32786885245901637\n", + "Average of Other Ratios: 0.1958333333333333\n", + "T-Statistic: -5.118158442680822\n", + "P-Value: 0.014436008902853572\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.15254237288135594\n", + "Average of Other Ratios: 0.11377118644067798\n", + "T-Statistic: -2.1108984192580493\n", + "P-Value: 0.12526011393630182\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 739\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 740\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.23210382513661199\n", + "T-Statistic: -12.123135549004308\n", + "P-Value: 0.0012080613029538494\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2033898305084746\n", + "Average of Other Ratios: 0.16031073446327682\n", + "T-Statistic: -2.958934625443265\n", + "P-Value: 0.05959496241150818\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 740\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 741\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7166666666666667\n", + "Average of Other Ratios: 0.2116120218579235\n", + "T-Statistic: -23.085068346256453\n", + "P-Value: 0.00017805380137088774\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.1480225988700565\n", + "T-Statistic: -3.1535457618776688\n", + "P-Value: 0.05112515625755018\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 741\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 742\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4262295081967213\n", + "Average of Other Ratios: 0.3\n", + "T-Statistic: -2.9706771945521644\n", + "P-Value: 0.05903607413760807\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.288135593220339\n", + "Average of Other Ratios: 0.1812853107344633\n", + "T-Statistic: -7.814244577012594\n", + "P-Value: 0.004362950284160135\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 742\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 743\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4262295081967213\n", + "Average of Other Ratios: 0.23333333333333334\n", + "T-Statistic: -2.8349832104998747\n", + "P-Value: 0.0659183394825102\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.288135593220339\n", + "Average of Other Ratios: 0.12655367231638417\n", + "T-Statistic: -9.201899164855353\n", + "P-Value: 0.0027144108276950137\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 743\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 744\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.2941256830601093\n", + "T-Statistic: -5.153821208882444\n", + "P-Value: 0.014162630156722995\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.19830508474576272\n", + "T-Statistic: -5.284229075567873\n", + "P-Value: 0.01321865065791954\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 744\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 745\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4262295081967213\n", + "Average of Other Ratios: 0.24583333333333335\n", + "T-Statistic: -2.8527267194908648\n", + "P-Value: 0.06496341216084892\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.1475988700564972\n", + "T-Statistic: -7.72844712666813\n", + "P-Value: 0.00450420801537689\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 745\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 746\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.2069672131147541\n", + "T-Statistic: -3.211725828402651\n", + "P-Value: 0.048891793073391886\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.18333333333333332\n", + "Average of Other Ratios: 0.1440677966101695\n", + "T-Statistic: -3.5889645674855384\n", + "P-Value: 0.037050266207125974\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 746\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 747\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.2360655737704918\n", + "T-Statistic: -3.510803607398461\n", + "P-Value: 0.03917521433599371\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.13919491525423727\n", + "T-Statistic: -4.348997984611037\n", + "P-Value: 0.022451877691097657\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 747\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 748\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.38333333333333336\n", + "Average of Other Ratios: 0.3026639344262295\n", + "T-Statistic: -2.8694731663260846\n", + "P-Value: 0.06407793061054502\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23333333333333334\n", + "Average of Other Ratios: 0.2076271186440678\n", + "T-Statistic: -6.0666666666666735\n", + "P-Value: 0.008988582402196459\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 748\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 749\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.21666666666666667\n", + "T-Statistic: -7.61224281479834\n", + "P-Value: 0.004705316919230158\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.16857344632768362\n", + "T-Statistic: -3.433022580597172\n", + "P-Value: 0.04144644945716894\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 749\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 750\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.2573087431693989\n", + "T-Statistic: -5.082825548341707\n", + "P-Value: 0.014713685416969534\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.21666666666666667\n", + "Average of Other Ratios: 0.1694915254237288\n", + "T-Statistic: -2.5768659443669355\n", + "P-Value: 0.08200254809923566\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 750\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 751\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.22862021857923498\n", + "T-Statistic: -3.065403117058221\n", + "P-Value: 0.05475981670342542\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.12217514124293785\n", + "T-Statistic: -6.391226809216671\n", + "P-Value: 0.0077572568493554745\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 751\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 752\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.26509562841530054\n", + "T-Statistic: -2.866308813315449\n", + "P-Value: 0.06424408813835332\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.15204802259887007\n", + "T-Statistic: -4.146966401618811\n", + "P-Value: 0.025473782147724618\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 752\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 753\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.30273224043715846\n", + "T-Statistic: -5.035807306036759\n", + "P-Value: 0.015094095073797963\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.1856638418079096\n", + "T-Statistic: -7.414159169988326\n", + "P-Value: 0.005076328764679312\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 753\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 754\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.3404371584699453\n", + "T-Statistic: -6.119691979185821\n", + "P-Value: 0.008770696803910683\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.2827683615819209\n", + "T-Statistic: -2.946612591077136\n", + "P-Value: 0.06018857902224991\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 754\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 755\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.20416666666666666\n", + "T-Statistic: -7.431074551672135\n", + "P-Value: 0.005043170304209105\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.16857344632768362\n", + "T-Statistic: -3.9354028393368354\n", + "P-Value: 0.02922522554345655\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 755\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 756\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.25\n", + "T-Statistic: -6.605727113313177\n", + "P-Value: 0.007063029498989654\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.288135593220339\n", + "Average of Other Ratios: 0.1855225988700565\n", + "T-Statistic: -9.224885508045242\n", + "P-Value: 0.002694722054185784\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 756\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 757\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.2698087431693989\n", + "T-Statistic: -9.036379043539897\n", + "P-Value: 0.0028619661292264213\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.18149717514124294\n", + "T-Statistic: -5.610767520239283\n", + "P-Value: 0.011190314670533465\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 757\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 758\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.2791666666666667\n", + "T-Statistic: -7.116030982441239\n", + "P-Value: 0.005711005406184151\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3050847457627119\n", + "Average of Other Ratios: 0.21532485875706214\n", + "T-Statistic: -4.672257354206355\n", + "P-Value: 0.018515494454469265\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 758\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 759\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.28620218579234974\n", + "T-Statistic: -3.720511834007275\n", + "P-Value: 0.033795204522967935\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.19413841807909604\n", + "T-Statistic: -14.324297025025746\n", + "P-Value: 0.000737365952799921\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 759\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 760\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.2201502732240437\n", + "T-Statistic: -5.295186309462342\n", + "P-Value: 0.013143123507478411\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.15\n", + "Average of Other Ratios: 0.1271186440677966\n", + "T-Statistic: -4.67653718043597\n", + "P-Value: 0.018469641646674036\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 760\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 761\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.28217213114754097\n", + "T-Statistic: -16.292961239551758\n", + "P-Value: 0.0005030512239375957\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.23022598870056496\n", + "T-Statistic: -2.4285714285714284\n", + "P-Value: 0.13584143478190688\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 761\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 762\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.47540983606557374\n", + "Average of Other Ratios: 0.275\n", + "T-Statistic: -3.8509508464272586\n", + "P-Value: 0.03092048438850596\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.1571563088512241\n", + "T-Statistic: -6.947685432148197\n", + "P-Value: 0.020094339591082675\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 762\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 763\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3770491803278688\n", + "Average of Other Ratios: 0.24166666666666664\n", + "T-Statistic: -6.140373651674497\n", + "P-Value: 0.008687594169327536\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2033898305084746\n", + "Average of Other Ratios: 0.12919020715630886\n", + "T-Statistic: -5.005419915905\n", + "P-Value: 0.03767240357329794\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 763\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 764\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.17397540983606558\n", + "T-Statistic: -18.4422633103658\n", + "P-Value: 0.0003478966667273698\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.1694915254237288\n", + "Average of Other Ratios: 0.13072033898305085\n", + "T-Statistic: -2.4581295560193186\n", + "P-Value: 0.09101873468865801\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 764\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 765\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.4228142076502732\n", + "T-Statistic: -4.415045976602256\n", + "P-Value: 0.02156528848562188\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.4389830508474576\n", + "T-Statistic: -4.148687032732543\n", + "P-Value: 0.025445896223527123\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 765\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 766\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.19999999999999998\n", + "T-Statistic: -8.806809104164614\n", + "P-Value: 0.003084718357015228\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2033898305084746\n", + "Average of Other Ratios: 0.14745762711864407\n", + "T-Statistic: -3.1135770454260725\n", + "P-Value: 0.052734401788326495\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 766\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 767\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.20314207650273225\n", + "T-Statistic: -9.794313683048983\n", + "P-Value: 0.002261966026846209\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.1864406779661017\n", + "Average of Other Ratios: 0.1432909604519774\n", + "T-Statistic: -3.097057119520469\n", + "P-Value: 0.053418165120319126\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 767\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 768\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.2532103825136612\n", + "T-Statistic: -5.1768582175628834\n", + "P-Value: 0.013989618848905717\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.15197740112994348\n", + "T-Statistic: -5.438132165795431\n", + "P-Value: 0.012207529595140413\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 768\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 769\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.2275273224043716\n", + "T-Statistic: -5.726067308431419\n", + "P-Value: 0.010572065566569503\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.16871468926553673\n", + "T-Statistic: -5.51623494926466\n", + "P-Value: 0.011732957333623041\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 769\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 770\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.1708333333333333\n", + "T-Statistic: -12.10156988574371\n", + "P-Value: 0.0012144271996402047\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23333333333333334\n", + "Average of Other Ratios: 0.15677966101694915\n", + "T-Statistic: -2.5809523809523816\n", + "P-Value: 0.08171227084668078\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 770\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 771\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4098360655737705\n", + "Average of Other Ratios: 0.23333333333333334\n", + "T-Statistic: -5.951158369984666\n", + "P-Value: 0.00948839549552589\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.1393361581920904\n", + "T-Statistic: -11.45967949880383\n", + "P-Value: 0.001426179984263791\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 771\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 772\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.47540983606557374\n", + "Average of Other Ratios: 0.20416666666666666\n", + "T-Statistic: -4.968520257961496\n", + "P-Value: 0.015661100563564314\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.21666666666666667\n", + "Average of Other Ratios: 0.13983050847457626\n", + "T-Statistic: -3.156608161329988\n", + "P-Value: 0.05100442714599591\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 772\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 773\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.2527322404371585\n", + "T-Statistic: -4.948933058520406\n", + "P-Value: 0.01583135472899473\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.20268361581920905\n", + "T-Statistic: -2.734913303711143\n", + "P-Value: 0.07164396808065115\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 773\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 774\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.47540983606557374\n", + "Average of Other Ratios: 0.25\n", + "T-Statistic: -4.9384820758662515\n", + "P-Value: 0.015923182580957953\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.16318267419962337\n", + "T-Statistic: -2.2651619180423666\n", + "P-Value: 0.15174721013539427\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 774\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 775\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.2739071038251366\n", + "T-Statistic: -13.930528616417067\n", + "P-Value: 0.0008008837712144284\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.1814265536723164\n", + "T-Statistic: -3.4427734423525815\n", + "P-Value: 0.04115269085326815\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 775\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 776\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.15833333333333333\n", + "T-Statistic: -7.384810731767565\n", + "P-Value: 0.005134544759237181\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2033898305084746\n", + "Average of Other Ratios: 0.16031073446327682\n", + "T-Statistic: -5.211581712072246\n", + "P-Value: 0.01373401507365159\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 776\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 777\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.38333333333333336\n", + "Average of Other Ratios: 0.30273224043715846\n", + "T-Statistic: -4.0696430409706865\n", + "P-Value: 0.026768690892997455\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.2573446327683616\n", + "T-Statistic: -5.309251443565037\n", + "P-Value: 0.01304699781574515\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 777\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 778\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.3855191256830601\n", + "T-Statistic: -2.636692552502379\n", + "P-Value: 0.07787649597226969\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.33709981167608283\n", + "T-Statistic: -28.00000000000001\n", + "P-Value: 0.0012730749910096228\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 778\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 779\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.22083333333333333\n", + "T-Statistic: -8.148412107258412\n", + "P-Value: 0.003865377073276304\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3220338983050847\n", + "Average of Other Ratios: 0.14759887005649716\n", + "T-Statistic: -7.540447009277395\n", + "P-Value: 0.00483552316841547\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 779\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 780\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.298224043715847\n", + "T-Statistic: -2.2807020090731873\n", + "P-Value: 0.10686619566510568\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.1561440677966102\n", + "T-Statistic: -14.299454098945166\n", + "P-Value: 0.0007411711769066464\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 780\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 781\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4262295081967213\n", + "Average of Other Ratios: 0.18333333333333332\n", + "T-Statistic: -4.17817014100824\n", + "P-Value: 0.02497413039088068\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.13481638418079095\n", + "T-Statistic: -4.017937620623259\n", + "P-Value: 0.027682186108019237\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 781\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 782\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.27322404371584696\n", + "T-Statistic: -5.040542454345847\n", + "P-Value: 0.015055208208642631\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.1426553672316384\n", + "T-Statistic: -3.941176470588237\n", + "P-Value: 0.15819200517512308\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 782\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 783\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.21530054644808744\n", + "T-Statistic: -4.283002068724982\n", + "P-Value: 0.02338543904381472\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.15254237288135594\n", + "Average of Other Ratios: 0.10988700564971751\n", + "T-Statistic: -2.589247891896882\n", + "P-Value: 0.08112688802619472\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 783\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 784\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.19583333333333333\n", + "T-Statistic: -7.410760977286471\n", + "P-Value: 0.005083024752688326\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.21666666666666667\n", + "Average of Other Ratios: 0.13559322033898305\n", + "T-Statistic: -4.428506143912038\n", + "P-Value: 0.021390194677135307\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 784\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 785\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3442622950819672\n", + "Average of Other Ratios: 0.27499999999999997\n", + "T-Statistic: -4.798632565231811\n", + "P-Value: 0.017221458650986843\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.16864406779661018\n", + "T-Statistic: -3.4776799529026854\n", + "P-Value: 0.040122557696007824\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 785\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 786\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.2864071038251366\n", + "T-Statistic: -4.100734975708682\n", + "P-Value: 0.02623802994170771\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.17309322033898306\n", + "T-Statistic: -7.501973136790743\n", + "P-Value: 0.004907258242497694\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 786\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 787\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.47540983606557374\n", + "Average of Other Ratios: 0.31666666666666665\n", + "T-Statistic: -4.974054989029201\n", + "P-Value: 0.015613424561556911\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.24449152542372882\n", + "T-Statistic: -3.749896613676689\n", + "P-Value: 0.03311865459697996\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 787\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 788\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.19863387978142077\n", + "T-Statistic: -6.300594478470427\n", + "P-Value: 0.00807755369838508\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23333333333333334\n", + "Average of Other Ratios: 0.1271186440677966\n", + "T-Statistic: -7.236123373843309\n", + "P-Value: 0.005443417574107074\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 788\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 789\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.35\n", + "Average of Other Ratios: 0.244603825136612\n", + "T-Statistic: -2.2254146723514387\n", + "P-Value: 0.11247501852404906\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.1864406779661017\n", + "Average of Other Ratios: 0.14613935969868172\n", + "T-Statistic: -3.446237951244447\n", + "P-Value: 0.07486648577768765\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 789\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 790\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.2041666666666667\n", + "T-Statistic: -13.312635870763579\n", + "P-Value: 0.0009160652152262239\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.16871468926553673\n", + "T-Statistic: -4.7295667427478545\n", + "P-Value: 0.017913544696107814\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 790\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 791\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6833333333333333\n", + "Average of Other Ratios: 0.4937158469945355\n", + "T-Statistic: -8.885516696979227\n", + "P-Value: 0.003005843404969766\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.4600988700564972\n", + "T-Statistic: -2.332818394140734\n", + "P-Value: 0.10188575953545359\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 791\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 792\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45901639344262296\n", + "Average of Other Ratios: 0.24166666666666664\n", + "T-Statistic: -13.62086314551715\n", + "P-Value: 0.0008560385924757967\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.22033898305084745\n", + "Average of Other Ratios: 0.1814265536723164\n", + "T-Statistic: -4.854861096609305\n", + "P-Value: 0.016683488889815655\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 792\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 793\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.26154371584699454\n", + "T-Statistic: -3.4496293922245655\n", + "P-Value: 0.040947730245819165\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.1350282485875706\n", + "T-Statistic: -5.941894164731409\n", + "P-Value: 0.027174489715421215\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 793\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 794\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.29890710382513663\n", + "T-Statistic: -3.8375524808634864\n", + "P-Value: 0.031200942303648546\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.21518361581920906\n", + "T-Statistic: -2.4427691887863823\n", + "P-Value: 0.09227205070358749\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 794\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 795\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.2693306010928962\n", + "T-Statistic: -6.246793566459261\n", + "P-Value: 0.008275938325843453\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.38333333333333336\n", + "Average of Other Ratios: 0.21610169491525422\n", + "T-Statistic: -9.572072668100612\n", + "P-Value: 0.002419061759112068\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 795\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 796\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.2655054644808743\n", + "T-Statistic: -9.468017765185019\n", + "P-Value: 0.002497594914513529\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.19823446327683616\n", + "T-Statistic: -5.967112085495144\n", + "P-Value: 0.009417228305284209\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 796\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 797\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.19501366120218577\n", + "T-Statistic: -6.412883435087076\n", + "P-Value: 0.007683202378919043\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.15254237288135594\n", + "Average of Other Ratios: 0.12238700564971752\n", + "T-Statistic: -6.812191696797892\n", + "P-Value: 0.00647001003369456\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 797\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 798\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.30683060109289617\n", + "T-Statistic: -2.6055084852992985\n", + "P-Value: 0.07999436123101161\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.22379943502824862\n", + "T-Statistic: -6.706721318121227\n", + "P-Value: 0.006764394593913462\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 798\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 799\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.19999999999999998\n", + "T-Statistic: -6.752251856457131\n", + "P-Value: 0.006635193291969306\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.288135593220339\n", + "Average of Other Ratios: 0.17281073446327685\n", + "T-Statistic: -5.072284754402177\n", + "P-Value: 0.014797870994727638\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 799\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 800\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.20833333333333334\n", + "T-Statistic: -19.77027395258561\n", + "P-Value: 0.0002827789468772716\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.1864406779661017\n", + "Average of Other Ratios: 0.16299435028248588\n", + "T-Statistic: -3.608695652173914\n", + "P-Value: 0.06894253641177729\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 800\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 801\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.4208333333333334\n", + "T-Statistic: -10.236282712367263\n", + "P-Value: 0.0019875612473827316\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.38411016949152543\n", + "T-Statistic: -7.205444253314168\n", + "P-Value: 0.005510176845082696\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 801\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 802\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.4541666666666667\n", + "T-Statistic: -4.489735890023653\n", + "P-Value: 0.020616535562147837\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.36563088512241054\n", + "T-Statistic: -2.235697940503432\n", + "P-Value: 0.1548857125971983\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 802\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 803\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.36038251366120216\n", + "T-Statistic: -4.0670388186695\n", + "P-Value: 0.02681376369186907\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.36864406779661013\n", + "T-Statistic: -3.21491849579106\n", + "P-Value: 0.04877283804441102\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 803\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 804\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6833333333333333\n", + "Average of Other Ratios: 0.4560792349726776\n", + "T-Statistic: -5.9382601309311465\n", + "P-Value: 0.009546449129946996\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.4011299435028249\n", + "T-Statistic: -4.050528318752622\n", + "P-Value: 0.027101807358158075\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 804\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 805\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.425\n", + "T-Statistic: -2.2720572719623497\n", + "P-Value: 0.10772054769411174\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.36737288135593216\n", + "T-Statistic: -3.6999746626775307\n", + "P-Value: 0.03427858129419643\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 805\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 806\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.45\n", + "T-Statistic: -3.6218924086243263\n", + "P-Value: 0.036199162722362586\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.4135593220338983\n", + "T-Statistic: -4.495611895592143\n", + "P-Value: 0.02054420972468061\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 806\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 807\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.38545081967213113\n", + "T-Statistic: -5.020124422686412\n", + "P-Value: 0.015223827432133234\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.3290960451977401\n", + "T-Statistic: -3.025290226140455\n", + "P-Value: 0.05652149905978702\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 807\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 808\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.4937158469945355\n", + "T-Statistic: -3.4194195863846306\n", + "P-Value: 0.041860729913222816\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.4639830508474576\n", + "T-Statistic: -2.3727172680373334\n", + "P-Value: 0.09826222788434269\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 808\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 809\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.5\n", + "T-Statistic: -2.260714065293725\n", + "P-Value: 0.10885413598684734\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.43898305084745765\n", + "T-Statistic: -3.136812146700215\n", + "P-Value: 0.051791252064485205\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 809\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 810\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7\n", + "Average of Other Ratios: 0.3735655737704918\n", + "T-Statistic: -10.454060628342173\n", + "P-Value: 0.0018684937780359754\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.35169491525423724\n", + "T-Statistic: -2.252629691622204\n", + "P-Value: 0.10967084489225898\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 810\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 811\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.4\n", + "T-Statistic: -9.756144412236056\n", + "P-Value: 0.0022879647959347044\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.4091101694915254\n", + "T-Statistic: -2.4465756114105797\n", + "P-Value: 0.09195951277963867\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 811\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 812\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7377049180327869\n", + "Average of Other Ratios: 0.39583333333333337\n", + "T-Statistic: -7.4590163934426235\n", + "P-Value: 0.004989020225851709\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.4260593220338983\n", + "T-Statistic: -2.3835678188830465\n", + "P-Value: 0.0973040279791015\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 812\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 813\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.3608606557377049\n", + "T-Statistic: -12.95979435165334\n", + "P-Value: 0.0009918481137665518\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.32902542372881355\n", + "T-Statistic: -6.272727272727274\n", + "P-Value: 0.0081795212436495\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 813\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 814\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.37083333333333335\n", + "T-Statistic: -10.272001058557553\n", + "P-Value: 0.001967356422200276\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3584039548022599\n", + "T-Statistic: -2.8942424583125095\n", + "P-Value: 0.06279568913296162\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 814\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 815\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7\n", + "Average of Other Ratios: 0.435655737704918\n", + "T-Statistic: -6.464633818447615\n", + "P-Value: 0.00750996829937376\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5423728813559322\n", + "Average of Other Ratios: 0.3962570621468927\n", + "T-Statistic: -4.31678224360102\n", + "P-Value: 0.022901451094550856\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 815\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 816\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7166666666666667\n", + "Average of Other Ratios: 0.45211748633879784\n", + "T-Statistic: -15.725247417039386\n", + "P-Value: 0.0005589740150536343\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.438771186440678\n", + "T-Statistic: -3.9134326914916078\n", + "P-Value: 0.029654511991928277\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 816\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 817\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.4125\n", + "T-Statistic: -3.7159734017373043\n", + "P-Value: 0.033901269284630456\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3586158192090395\n", + "T-Statistic: -11.057143071059498\n", + "P-Value: 0.0015845269383183932\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 817\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 818\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.40191256830601096\n", + "T-Statistic: -2.7474782254251675\n", + "P-Value: 0.07089198741296619\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.3093220338983051\n", + "T-Statistic: -2.0349703424076058\n", + "P-Value: 0.1788266478229615\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 818\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 819\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.45642076502732243\n", + "T-Statistic: -10.118093819799428\n", + "P-Value: 0.0020564110187835205\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.4639124293785311\n", + "T-Statistic: -3.1846701192429343\n", + "P-Value: 0.049914723854652635\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 819\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 820\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.39822404371584696\n", + "T-Statistic: -2.815880143112684\n", + "P-Value: 0.06696608581805782\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.40091807909604515\n", + "T-Statistic: -2.9501131862499728\n", + "P-Value: 0.060019186645291704\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 820\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 821\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.48333333333333334\n", + "T-Statistic: -5.404377109955353\n", + "P-Value: 0.012420402409580856\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.41572504708097924\n", + "T-Statistic: -2.4051807590126426\n", + "P-Value: 0.13797203364885854\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 821\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 822\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.4083333333333333\n", + "T-Statistic: -6.759987541853552\n", + "P-Value: 0.006613565506268803\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3145009416195857\n", + "T-Statistic: -9.800000000000015\n", + "P-Value: 0.010252475022698292\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 822\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 823\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.4916666666666667\n", + "T-Statistic: -6.965216353380622\n", + "P-Value: 0.006072164935164103\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.42664783427495295\n", + "T-Statistic: -3.434253416143983\n", + "P-Value: 0.07533225481738264\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 823\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 824\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.3817622950819672\n", + "T-Statistic: -6.45646486744287\n", + "P-Value: 0.007536970061691775\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.35847457627118645\n", + "T-Statistic: -3.7468218576747336\n", + "P-Value: 0.03318862977572546\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 824\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 825\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.425\n", + "T-Statistic: -18.39984547099852\n", + "P-Value: 0.0003502913010235412\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.4152542372881356\n", + "T-Statistic: -2.677777777777778\n", + "P-Value: 0.07519088494134536\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 825\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 826\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.3583333333333334\n", + "T-Statistic: -6.037002655406131\n", + "P-Value: 0.009113578699967435\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3415960451977401\n", + "T-Statistic: -2.404160372045395\n", + "P-Value: 0.09551661130031602\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 826\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 827\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.37083333333333335\n", + "T-Statistic: -8.311288381099306\n", + "P-Value: 0.003649917396478642\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.3686440677966102\n", + "T-Statistic: -3.7363704002836675\n", + "P-Value: 0.03342790419303777\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 827\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 828\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.3833333333333333\n", + "T-Statistic: -5.370455913263936\n", + "P-Value: 0.012639237313128257\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.3855932203389831\n", + "T-Statistic: -3.427505887692899\n", + "P-Value: 0.041613829518252106\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 828\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 829\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.44371584699453553\n", + "T-Statistic: -2.834044674981603\n", + "P-Value: 0.06596933591930301\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.45572033898305087\n", + "T-Statistic: -2.7857691081092364\n", + "P-Value: 0.06866006993419116\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 829\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 830\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.5101092896174864\n", + "T-Statistic: -3.547608678177134\n", + "P-Value: 0.038155756341829127\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.4322033898305085\n", + "T-Statistic: -5.013002700820275\n", + "P-Value: 0.015283219713031168\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 830\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 831\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.43558743169398906\n", + "T-Statistic: -3.3200966453052976\n", + "P-Value: 0.045050836488169606\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.41786723163841805\n", + "T-Statistic: -2.9961510064713686\n", + "P-Value: 0.057846064903379533\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 831\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 832\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.4814890710382514\n", + "T-Statistic: -8.921688593482292\n", + "P-Value: 0.0029704891181838475\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.576271186440678\n", + "Average of Other Ratios: 0.384180790960452\n", + "T-Statistic: -4.605541138054455\n", + "P-Value: 0.01924977204112997\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 832\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 833\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.36249999999999993\n", + "T-Statistic: -10.885700407196474\n", + "P-Value: 0.0016590620208720404\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.4048022598870057\n", + "T-Statistic: -3.1550201885686455\n", + "P-Value: 0.051066984905793825\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 833\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 834\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.4625\n", + "T-Statistic: -3.8807823860207193\n", + "P-Value: 0.030307571526173836\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3936911487758945\n", + "T-Statistic: -1.6469089297144321\n", + "P-Value: 0.24133036369239244\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 834\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 835\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.4041666666666667\n", + "T-Statistic: -3.796834767979554\n", + "P-Value: 0.032073534237681285\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.38983050847457623\n", + "T-Statistic: -6.0432055511403355\n", + "P-Value: 0.009087253623111777\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 835\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 836\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.35\n", + "T-Statistic: -6.095615458270268\n", + "P-Value: 0.008868760073876481\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3036723163841808\n", + "T-Statistic: -4.41816098848226\n", + "P-Value: 0.021524602925384974\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 836\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 837\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.4852459016393443\n", + "T-Statistic: -3.2682313237305527\n", + "P-Value: 0.04683949096861523\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.4557203389830508\n", + "T-Statistic: -7.350069795231334\n", + "P-Value: 0.00520460105096178\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 837\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 838\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.4291666666666667\n", + "T-Statistic: -3.443843878513792\n", + "P-Value: 0.041120604000996194\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3461864406779661\n", + "T-Statistic: -5.940860662234962\n", + "P-Value: 0.009534706994175631\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 838\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 839\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.4435109289617486\n", + "T-Statistic: -3.1918220004780653\n", + "P-Value: 0.04964171462252754\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3879237288135593\n", + "T-Statistic: -3.2460121747166957\n", + "P-Value: 0.04763324643436874\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 839\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 840\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.45833333333333337\n", + "T-Statistic: -13.269636352208451\n", + "P-Value: 0.000924879437843544\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.3924435028248588\n", + "T-Statistic: -4.124951949583921\n", + "P-Value: 0.02583406935919925\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 840\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 841\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.3773224043715847\n", + "T-Statistic: -4.587084180430928\n", + "P-Value: 0.01945956097411938\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.38135593220338987\n", + "T-Statistic: -1.7503501050350099\n", + "P-Value: 0.17836241760761135\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 841\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 842\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.4666666666666667\n", + "T-Statistic: -5.570797323763848\n", + "P-Value: 0.011415660012176308\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.396680790960452\n", + "T-Statistic: -3.34946047620416\n", + "P-Value: 0.04407650716830172\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 842\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 843\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.4541666666666666\n", + "T-Statistic: -3.965300941783783\n", + "P-Value: 0.028653776872905008\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.4406779661016949\n", + "T-Statistic: -3.8340579025361636\n", + "P-Value: 0.03127462797536876\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 843\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 844\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7868852459016393\n", + "Average of Other Ratios: 0.32916666666666666\n", + "T-Statistic: -12.411848878312394\n", + "P-Value: 0.0011269497196247178\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3418079096045198\n", + "T-Statistic: -5.165514464459437\n", + "P-Value: 0.014074465381170985\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 844\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 845\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7213114754098361\n", + "Average of Other Ratios: 0.4291666666666667\n", + "T-Statistic: -8.523579646154461\n", + "P-Value: 0.0033922878667983014\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.3840395480225988\n", + "T-Statistic: -6.480569535567828\n", + "P-Value: 0.007457659131028016\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 845\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 846\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.47704918032786886\n", + "T-Statistic: -5.705858082917919\n", + "P-Value: 0.01067713705598062\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.4491525423728813\n", + "T-Statistic: -6.170953464957155\n", + "P-Value: 0.008566603032878853\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 846\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 847\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.36250000000000004\n", + "T-Statistic: -4.348783893927762\n", + "P-Value: 0.022454827486827797\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.33728813559322035\n", + "T-Statistic: -2.5791808608632842\n", + "P-Value: 0.08183795390499486\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 847\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 848\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.4708333333333333\n", + "T-Statistic: -3.0287534515367915\n", + "P-Value: 0.05636661686037541\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3992467043314501\n", + "T-Statistic: -2.0879830038875222\n", + "P-Value: 0.17203960039889202\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 848\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 849\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.4523224043715847\n", + "T-Statistic: -4.428901327226881\n", + "P-Value: 0.02138508188391737\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.559322033898305\n", + "Average of Other Ratios: 0.4053672316384181\n", + "T-Statistic: -5.498322668127967\n", + "P-Value: 0.01183963469227656\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 849\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 850\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.37916666666666665\n", + "T-Statistic: -5.497766397039839\n", + "P-Value: 0.011842967860121692\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.36292372881355933\n", + "T-Statistic: -6.299107729795289\n", + "P-Value: 0.008082951481625023\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 850\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 851\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.3694672131147541\n", + "T-Statistic: -7.27109198627894\n", + "P-Value: 0.005368618417249257\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.3771186440677966\n", + "T-Statistic: -2.0909375180606187\n", + "P-Value: 0.12766278225478178\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 851\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 852\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.35833333333333334\n", + "T-Statistic: -8.672148663324567\n", + "P-Value: 0.003226136549403669\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3423728813559322\n", + "T-Statistic: -1.7039616353344613\n", + "P-Value: 0.2305022123029592\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 852\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 853\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.3625\n", + "T-Statistic: -4.761760610530732\n", + "P-Value: 0.01758652689930633\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.32902542372881355\n", + "T-Statistic: -6.562385865517576\n", + "P-Value: 0.007196507966961928\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 853\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 854\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.3647540983606557\n", + "T-Statistic: -7.531019558041822\n", + "P-Value: 0.017178611116784117\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3251412429378531\n", + "T-Statistic: -5.210719601593815\n", + "P-Value: 0.013740287056139356\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 854\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 855\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.4083333333333333\n", + "T-Statistic: -3.1652931763476935\n", + "P-Value: 0.05066398901162397\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.32627118644067793\n", + "T-Statistic: -2.0939473213563398\n", + "P-Value: 0.17129801016782423\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 855\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 856\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.3625\n", + "T-Statistic: -6.508196721311476\n", + "P-Value: 0.007368102187221459\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.32937853107344633\n", + "T-Statistic: -3.048964780184798\n", + "P-Value: 0.05547328824097628\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 856\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 857\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.4562841530054645\n", + "T-Statistic: -2.5241076101780897\n", + "P-Value: 0.08586662930511714\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.39661016949152544\n", + "T-Statistic: -7.7424126813571705\n", + "P-Value: 0.004480807680029333\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 857\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 858\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.42916666666666664\n", + "T-Statistic: -5.888028951637414\n", + "P-Value: 0.00977702506049218\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.37608286252354045\n", + "T-Statistic: -1.5525897646537226\n", + "P-Value: 0.2607163805465733\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 858\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 859\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.4227459016393443\n", + "T-Statistic: -4.090863905829874\n", + "P-Value: 0.02640502342776308\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3336864406779661\n", + "T-Statistic: -3.1952478845632513\n", + "P-Value: 0.04951160636548708\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 859\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 860\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.38504098360655736\n", + "T-Statistic: -3.331454908237819\n", + "P-Value: 0.044670745752969254\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3458333333333333\n", + "T-Statistic: -9.810579653543469\n", + "P-Value: 0.002251005507538249\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 860\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 861\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.4189207650273224\n", + "T-Statistic: -2.2659856668962592\n", + "P-Value: 0.10832553396730833\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.36257062146892655\n", + "T-Statistic: -4.233384268372494\n", + "P-Value: 0.0241205868939632\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 861\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 862\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.4125\n", + "T-Statistic: -8.339578454332552\n", + "P-Value: 0.0036141243204565873\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.4009180790960452\n", + "T-Statistic: -4.384410769545493\n", + "P-Value: 0.02197077351423448\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 862\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 863\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.4083333333333333\n", + "T-Statistic: -2.501174736689476\n", + "P-Value: 0.08761587052799776\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.3220338983050847\n", + "T-Statistic: -5.039047529047533\n", + "P-Value: 0.015067470993325742\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 863\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 864\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7166666666666667\n", + "Average of Other Ratios: 0.40669398907103826\n", + "T-Statistic: -7.514384861135337\n", + "P-Value: 0.004883963889775299\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.37959039548022605\n", + "T-Statistic: -3.066783460464022\n", + "P-Value: 0.05470043251723667\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 864\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 865\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.3781420765027323\n", + "T-Statistic: -5.634742497670123\n", + "P-Value: 0.011057936145282518\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.42372881355932207\n", + "T-Statistic: -2.84604989415154\n", + "P-Value: 0.06532071006198013\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 865\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 866\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.3776639344262295\n", + "T-Statistic: -4.858609077032151\n", + "P-Value: 0.01664841190941072\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3458333333333333\n", + "T-Statistic: -3.64966532468076\n", + "P-Value: 0.03550063326417129\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 866\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 867\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.4625\n", + "T-Statistic: -2.1172312753822844\n", + "P-Value: 0.12450912024159524\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.358545197740113\n", + "T-Statistic: -3.7292186611760196\n", + "P-Value: 0.033592909525737194\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 867\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 868\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.43750000000000006\n", + "T-Statistic: -9.957781870946098\n", + "P-Value: 0.002154944280259222\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3461864406779661\n", + "T-Statistic: -3.667546560934897\n", + "P-Value: 0.03505999613164613\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 868\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 869\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.37916666666666665\n", + "T-Statistic: -7.6307221538215995\n", + "P-Value: 0.004672553983451911\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.34173728813559323\n", + "T-Statistic: -3.266799462776711\n", + "P-Value: 0.046890135550055285\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 869\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 870\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.4523224043715847\n", + "T-Statistic: -6.124375580236181\n", + "P-Value: 0.00875178616778322\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5423728813559322\n", + "Average of Other Ratios: 0.40508474576271186\n", + "T-Statistic: -5.713299488454646\n", + "P-Value: 0.010638289400887548\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 870\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 871\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.34795081967213115\n", + "T-Statistic: -2.822931024840667\n", + "P-Value: 0.06657696557341944\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.2995056497175141\n", + "T-Statistic: -4.670840095116222\n", + "P-Value: 0.018530711133626115\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 871\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 872\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7704918032786885\n", + "Average of Other Ratios: 0.3208333333333333\n", + "T-Statistic: -14.29409263370358\n", + "P-Value: 0.0007419958228994995\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.3644067796610169\n", + "T-Statistic: -3.5603266600026933\n", + "P-Value: 0.03781136121703687\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 872\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 873\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.3416666666666667\n", + "T-Statistic: -11.034070169059069\n", + "P-Value: 0.0015942956657695156\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.295409604519774\n", + "T-Statistic: -3.8525647377658387\n", + "P-Value: 0.030886920710077346\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 873\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 874\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.4352459016393443\n", + "T-Statistic: -2.8995067504928964\n", + "P-Value: 0.06252731441759059\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3878531073446328\n", + "T-Statistic: -2.7019645629286955\n", + "P-Value: 0.07366341268340729\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 874\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 875\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.3688524590163934\n", + "T-Statistic: -4.890622801652107\n", + "P-Value: 0.016352674036444928\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.3516949152542373\n", + "T-Statistic: -2.830478177733986\n", + "P-Value: 0.06616357500554147\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 875\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 876\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.36666666666666664\n", + "T-Statistic: -4.753997634988252\n", + "P-Value: 0.01766466674069792\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.36666666666666664\n", + "Average of Other Ratios: 0.32627118644067793\n", + "T-Statistic: -2.1497076400880912\n", + "P-Value: 0.12074123791130136\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 876\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 877\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.4375\n", + "T-Statistic: -12.970212234731923\n", + "P-Value: 0.0009894935210338785\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.36285310734463283\n", + "T-Statistic: -3.674885279264579\n", + "P-Value: 0.03488117727992117\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 877\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 878\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.32916666666666666\n", + "T-Statistic: -5.592518411047395\n", + "P-Value: 0.011292471025751814\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.27415254237288134\n", + "T-Statistic: -8.239062446111248\n", + "P-Value: 0.0037434491451463214\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 878\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 879\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4918032786885246\n", + "Average of Other Ratios: 0.39583333333333337\n", + "T-Statistic: -4.8738225751540805\n", + "P-Value: 0.016507011615195296\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.32916666666666666\n", + "T-Statistic: -28.126664663270653\n", + "P-Value: 9.866049174120721e-05\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 879\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 880\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.43333333333333335\n", + "T-Statistic: -3.1761845581166224\n", + "P-Value: 0.050241108410324004\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.3347457627118644\n", + "T-Statistic: -2.188743887105837\n", + "P-Value: 0.11639121058222453\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 880\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 881\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.43749999999999994\n", + "T-Statistic: -3.491803278688525\n", + "P-Value: 0.039715098203703336\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5423728813559322\n", + "Average of Other Ratios: 0.4218220338983051\n", + "T-Statistic: -11.810674379463547\n", + "P-Value: 0.0013048196771810819\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 881\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 882\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.375\n", + "T-Statistic: -3.0387597258755705\n", + "P-Value: 0.05592210337011148\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.3093220338983051\n", + "T-Statistic: -1.5714285714285712\n", + "P-Value: 0.25668888376056537\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 882\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 883\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7377049180327869\n", + "Average of Other Ratios: 0.4458333333333333\n", + "T-Statistic: -8.515608096333422\n", + "P-Value: 0.0034015195584640207\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.47662429378531074\n", + "T-Statistic: -2.224165710326518\n", + "P-Value: 0.11260576823386355\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 883\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 884\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.45170765027322407\n", + "T-Statistic: -4.285643375079597\n", + "P-Value: 0.02334712177086421\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.559322033898305\n", + "Average of Other Ratios: 0.396680790960452\n", + "T-Statistic: -4.238952655416746\n", + "P-Value: 0.024036610732601344\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 884\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 885\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.4147540983606558\n", + "T-Statistic: -2.7228776982622653\n", + "P-Value: 0.07237357811230395\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.3206920903954802\n", + "T-Statistic: -3.753591024171325\n", + "P-Value: 0.03303482712591477\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 885\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 886\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.46509562841530055\n", + "T-Statistic: -5.141289506242194\n", + "P-Value: 0.014257918214039442\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.40946327683615824\n", + "T-Statistic: -3.475538999555497\n", + "P-Value: 0.04018478833317944\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 886\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 887\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.47540983606557374\n", + "Average of Other Ratios: 0.4208333333333334\n", + "T-Statistic: -2.4902246368970027\n", + "P-Value: 0.08846651702809234\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.379590395480226\n", + "T-Statistic: -3.6545153763584652\n", + "P-Value: 0.03538041968158101\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 887\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 888\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.43333333333333335\n", + "T-Statistic: -2.8622478469687973\n", + "P-Value: 0.06445811513039201\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.40494350282485875\n", + "T-Statistic: -2.9327458060449008\n", + "P-Value: 0.06086550016185654\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 888\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 889\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.45157103825136613\n", + "T-Statistic: -2.989953698734684\n", + "P-Value: 0.05813278125064246\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.43898305084745765\n", + "T-Statistic: -2.551058736525074\n", + "P-Value: 0.08386536725088448\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 889\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 890\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.3833333333333333\n", + "T-Statistic: -3.057286949843683\n", + "P-Value: 0.05511063014547841\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.35593220338983056\n", + "T-Statistic: -2.84815729902686\n", + "P-Value: 0.06520767196759084\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 890\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 891\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.4166666666666667\n", + "T-Statistic: -10.548278185933164\n", + "P-Value: 0.0018199105302834477\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.31645480225988704\n", + "T-Statistic: -39.57411910381416\n", + "P-Value: 3.5500916860886294e-05\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 891\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 892\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.49337431693989064\n", + "T-Statistic: -2.7692008371142\n", + "P-Value: 0.0696149257313345\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.4260593220338983\n", + "T-Statistic: -8.799079181288127\n", + "P-Value: 0.0030926117525078255\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 892\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 893\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.3858606557377049\n", + "T-Statistic: -3.299959459423778\n", + "P-Value: 0.0457348408632345\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3880649717514124\n", + "T-Statistic: -2.6501717516402645\n", + "P-Value: 0.07698251884827165\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 893\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 894\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.4625\n", + "T-Statistic: -3.800342933561155\n", + "P-Value: 0.031997130689119066\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.41793785310734466\n", + "T-Statistic: -3.015997915032065\n", + "P-Value: 0.05693971913913344\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 894\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 895\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.3866120218579235\n", + "T-Statistic: -1.7856892936312567\n", + "P-Value: 0.21607017204333234\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.37711864406779666\n", + "T-Statistic: -3.8784935044945925\n", + "P-Value: 0.030354042786245917\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 895\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 896\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.4916666666666667\n", + "T-Statistic: -3.723879479976154\n", + "P-Value: 0.0337167759198274\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.41619585687382293\n", + "T-Statistic: -1.4090909090909103\n", + "P-Value: 0.29417736761437996\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 896\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 897\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.43948087431693994\n", + "T-Statistic: -2.3754283562972685\n", + "P-Value: 0.09802174273100749\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.39830508474576276\n", + "T-Statistic: -9.295160030897799\n", + "P-Value: 0.0026356894602544566\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 897\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 898\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.37725409836065577\n", + "T-Statistic: -3.8749319798464548\n", + "P-Value: 0.03042653470881461\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.38333333333333336\n", + "Average of Other Ratios: 0.3305084745762712\n", + "T-Statistic: -3.5988166779486694\n", + "P-Value: 0.03679295873270534\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 898\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 899\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.4\n", + "T-Statistic: -13.295383727881227\n", + "P-Value: 0.0009195881666894832\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.3644067796610169\n", + "T-Statistic: -1.628834176702405\n", + "P-Value: 0.20183517132355724\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 899\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 900\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4316939890710383\n", + "T-Statistic: -6.669993903142867\n", + "P-Value: 0.006871037958981735\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.3845338983050847\n", + "T-Statistic: -2.6313736841918445\n", + "P-Value: 0.07823278504898863\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 900\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 901\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.4916666666666667\n", + "T-Statistic: -9.672172825695142\n", + "P-Value: 0.0023465711838438533\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.559322033898305\n", + "Average of Other Ratios: 0.4138418079096045\n", + "T-Statistic: -3.773628023053654\n", + "P-Value: 0.032584885614146145\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 901\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 902\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.4666666666666667\n", + "T-Statistic: -9.041003671038775\n", + "P-Value: 0.0028577010889647217\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3712806026365348\n", + "T-Statistic: -1.8625711948272987\n", + "P-Value: 0.20356174355735687\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 902\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 903\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4603825136612022\n", + "T-Statistic: -3.7003174802745145\n", + "P-Value: 0.03427044013621927\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.409180790960452\n", + "T-Statistic: -4.9902021373122745\n", + "P-Value: 0.015475409518374506\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 903\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 904\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7666666666666667\n", + "Average of Other Ratios: 0.3653005464480874\n", + "T-Statistic: -8.367710625283678\n", + "P-Value: 0.0035789897552198605\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3536723163841808\n", + "T-Statistic: -3.085580966165473\n", + "P-Value: 0.09093394434924655\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 904\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 905\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.3666666666666667\n", + "T-Statistic: -11.604959601054722\n", + "P-Value: 0.0013742030324602752\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3163841807909604\n", + "T-Statistic: -10.370899457402706\n", + "P-Value: 0.0019128148288002538\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 905\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 906\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.4771174863387978\n", + "T-Statistic: -4.574500631486023\n", + "P-Value: 0.019604293549578128\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.40070621468926554\n", + "T-Statistic: -5.159875560151832\n", + "P-Value: 0.014116891909403921\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 906\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 907\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.39999999999999997\n", + "T-Statistic: -4.521534453338067\n", + "P-Value: 0.020229034371625897\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.33319209039548026\n", + "T-Statistic: -4.493884775113415\n", + "P-Value: 0.02056543411778116\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 907\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 908\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.4359972677595628\n", + "T-Statistic: -3.71598604315986\n", + "P-Value: 0.03390097325869757\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.4152542372881356\n", + "T-Statistic: -2.847055192728138\n", + "P-Value: 0.0652667568561372\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 908\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 909\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.37083333333333335\n", + "T-Statistic: -4.12972900744298\n", + "P-Value: 0.025755332738221934\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.33145009416195853\n", + "T-Statistic: -11.716898663286049\n", + "P-Value: 0.007205446408459767\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 909\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 910\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7377049180327869\n", + "Average of Other Ratios: 0.35416666666666663\n", + "T-Statistic: -11.65895831862298\n", + "P-Value: 0.0013555253490377533\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.29936440677966103\n", + "T-Statistic: -1.9119123291898108\n", + "P-Value: 0.1518400483442069\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 910\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 911\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.37916666666666665\n", + "T-Statistic: -8.407323638843176\n", + "P-Value: 0.0035302782268764117\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.32005649717514123\n", + "T-Statistic: -2.143768150852684\n", + "P-Value: 0.1652700646335296\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 911\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 912\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.32499999999999996\n", + "T-Statistic: -8.404697750055918\n", + "P-Value: 0.003533480017692563\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3220338983050847\n", + "Average of Other Ratios: 0.2693973634651601\n", + "T-Statistic: -2.180850112451567\n", + "P-Value: 0.1609698801291486\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 912\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 913\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.47704918032786886\n", + "T-Statistic: -3.572872262389389\n", + "P-Value: 0.037475510464252\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.4132768361581921\n", + "T-Statistic: -2.677380542667445\n", + "P-Value: 0.07521629738306508\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 913\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 914\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.4041666666666667\n", + "T-Statistic: -3.3807925592909616\n", + "P-Value: 0.04306616942393178\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.3771186440677966\n", + "T-Statistic: -7.008990210800726\n", + "P-Value: 0.005964312251936928\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 914\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 915\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.43558743169398906\n", + "T-Statistic: -4.55116556463313\n", + "P-Value: 0.01987640371413144\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.576271186440678\n", + "Average of Other Ratios: 0.400635593220339\n", + "T-Statistic: -6.348003491637961\n", + "P-Value: 0.007907886129610865\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 915\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 916\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.5\n", + "T-Statistic: -3.265341686400343\n", + "P-Value: 0.04694176800449449\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3949152542372881\n", + "T-Statistic: -18.999999999999993\n", + "P-Value: 0.03347541671314822\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 916\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 917\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.42711748633879776\n", + "T-Statistic: -3.8576108132230886\n", + "P-Value: 0.030782280046241853\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.3474576271186441\n", + "T-Statistic: -3.086709862908689\n", + "P-Value: 0.05385213329290739\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 917\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 918\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.4313524590163934\n", + "T-Statistic: -3.8365066246858697\n", + "P-Value: 0.03122297154382187\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3258003766478343\n", + "T-Statistic: -15.805266578356209\n", + "P-Value: 0.003979221225814003\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 918\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 919\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.3458333333333333\n", + "T-Statistic: -11.442187082108672\n", + "P-Value: 0.0014326132345143508\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3459039548022599\n", + "T-Statistic: -2.838910304269998\n", + "P-Value: 0.06570548867709089\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 919\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 920\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.4420765027322404\n", + "T-Statistic: -4.718756309978549\n", + "P-Value: 0.04209478615042156\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.4134887005649718\n", + "T-Statistic: -3.585413143844187\n", + "P-Value: 0.037143581857500066\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 920\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 921\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.45266393442622954\n", + "T-Statistic: -5.549004690060408\n", + "P-Value: 0.011541028000440172\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.43848870056497175\n", + "T-Statistic: -3.2975199061269667\n", + "P-Value: 0.04581859729168717\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 921\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 922\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.4666666666666667\n", + "T-Statistic: -4.2869837090014355\n", + "P-Value: 0.02332770866411145\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.4009180790960452\n", + "T-Statistic: -4.901036565932749\n", + "P-Value: 0.016257948615872354\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 922\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 923\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.4291666666666667\n", + "T-Statistic: -3.7406497535262835\n", + "P-Value: 0.03332966691975382\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3584039548022599\n", + "T-Statistic: -3.1383427887418938\n", + "P-Value: 0.051729870414491945\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 923\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 924\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7333333333333333\n", + "Average of Other Ratios: 0.5189890710382514\n", + "T-Statistic: -7.795621704107285\n", + "P-Value: 0.004393108665011521\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.559322033898305\n", + "Average of Other Ratios: 0.5021892655367232\n", + "T-Statistic: -6.111799193237621\n", + "P-Value: 0.008802686470096136\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 924\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 925\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.3902322404371585\n", + "T-Statistic: -7.19301070709552\n", + "P-Value: 0.005537540195779914\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3416666666666667\n", + "T-Statistic: -6.884772011368271\n", + "P-Value: 0.00627716185131719\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 925\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 926\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.360724043715847\n", + "T-Statistic: -7.579568529572813\n", + "P-Value: 0.004763992639250445\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3459039548022599\n", + "T-Statistic: -2.5149332127933626\n", + "P-Value: 0.08656123080121365\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 926\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 927\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7333333333333333\n", + "Average of Other Ratios: 0.3858606557377049\n", + "T-Statistic: -14.089106567555575\n", + "P-Value: 0.0007744612617815288\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.39668079096045206\n", + "T-Statistic: -3.806413998599051\n", + "P-Value: 0.03186546090943339\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 927\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 928\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.4041666666666667\n", + "T-Statistic: -2.527395473158105\n", + "P-Value: 0.08561936927249675\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.34187853107344635\n", + "T-Statistic: -4.207255276582219\n", + "P-Value: 0.02451973533970688\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 928\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 929\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7377049180327869\n", + "Average of Other Ratios: 0.3875\n", + "T-Statistic: -12.007025761124119\n", + "P-Value: 0.001242867214816079\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.3376412429378531\n", + "T-Statistic: -4.4091197996331335\n", + "P-Value: 0.021642966336736617\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 929\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 930\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.4230874316939891\n", + "T-Statistic: -3.347260874883563\n", + "P-Value: 0.04414856066329664\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.3728813559322034\n", + "T-Statistic: -2.2290356659326926\n", + "P-Value: 0.11209698013636105\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 930\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 931\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7540983606557377\n", + "Average of Other Ratios: 0.35000000000000003\n", + "T-Statistic: -16.471846559014978\n", + "P-Value: 0.00048698021922821526\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.3474576271186441\n", + "T-Statistic: -3.0867098629086884\n", + "P-Value: 0.05385213329290739\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 931\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 932\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.38749999999999996\n", + "T-Statistic: -39.783878716039034\n", + "P-Value: 3.4943182204626624e-05\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.3728813559322034\n", + "T-Statistic: -3.7150594432211537\n", + "P-Value: 0.033922680348442695\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 932\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 933\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.3833333333333333\n", + "T-Statistic: -27.287366605133304\n", + "P-Value: 0.0001080165038157327\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3629943502824859\n", + "T-Statistic: -3.5263157894736845\n", + "P-Value: 0.03874136219588886\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 933\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 934\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.3605874316939891\n", + "T-Statistic: -4.472129156189748\n", + "P-Value: 0.020835236225830767\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3627824858757063\n", + "T-Statistic: -5.7192755125875685\n", + "P-Value: 0.010607225660250866\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 934\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 935\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.42083333333333334\n", + "T-Statistic: -6.50235424552611\n", + "P-Value: 0.007386922898311131\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.2923728813559322\n", + "T-Statistic: -10.891044814105634\n", + "P-Value: 0.001656669068269899\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 935\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 936\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7\n", + "Average of Other Ratios: 0.36502732240437163\n", + "T-Statistic: -7.572061952950856\n", + "P-Value: 0.004777609010065385\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.34173728813559323\n", + "T-Statistic: -17.461538461538474\n", + "P-Value: 0.0004093729804445589\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 936\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 937\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.425\n", + "T-Statistic: -4.356743170503809\n", + "P-Value: 0.022345498848743366\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.4279661016949152\n", + "T-Statistic: -4.429570467592002\n", + "P-Value: 0.021376428303786093\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 937\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 938\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.4153688524590164\n", + "T-Statistic: -3.1703304743958\n", + "P-Value: 0.05046784910179293\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.33319209039548026\n", + "T-Statistic: -1.7611959878594554\n", + "P-Value: 0.17642304434957817\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 938\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 939\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.3284153005464481\n", + "T-Statistic: -3.071040072795025\n", + "P-Value: 0.05451781694215867\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.2966101694915254\n", + "T-Statistic: -4.147575310031268\n", + "P-Value: 0.025463909186015793\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 939\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 940\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.4375\n", + "T-Statistic: -5.097326714872012\n", + "P-Value: 0.014598887274167346\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.4387005649717514\n", + "T-Statistic: -6.879715746378599\n", + "P-Value: 0.006290349260396344\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 940\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 941\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.39781420765027325\n", + "T-Statistic: -16.249999999999922\n", + "P-Value: 0.0037656052779771634\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3545197740112994\n", + "T-Statistic: -10.777777777777775\n", + "P-Value: 0.0017083811519259897\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 941\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 942\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.35947176684881604\n", + "T-Statistic: -3.9829601606847738\n", + "P-Value: 0.05763976433444822\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.32895480225988705\n", + "T-Statistic: -4.070774182545795\n", + "P-Value: 0.026749143973066495\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 942\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 943\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.4313524590163934\n", + "T-Statistic: -1.9917554271299593\n", + "P-Value: 0.14044440711390466\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.40254237288135597\n", + "T-Statistic: -2.414985892866683\n", + "P-Value: 0.09459301523161902\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 943\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 944\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.41905737704918034\n", + "T-Statistic: -7.182256405781872\n", + "P-Value: 0.005561352568453646\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.4221751412429378\n", + "T-Statistic: -3.7480916030534375\n", + "P-Value: 0.03315971002355427\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 944\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 945\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.3982923497267759\n", + "T-Statistic: -5.599507245339936\n", + "P-Value: 0.011253204715190618\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.37139830508474575\n", + "T-Statistic: -6.762389833500976\n", + "P-Value: 0.006606867955287357\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 945\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 946\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.39829234972677596\n", + "T-Statistic: -4.723689577243244\n", + "P-Value: 0.017974094449026225\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.33742937853107347\n", + "T-Statistic: -4.058782494615296\n", + "P-Value: 0.02695730866033387\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 946\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 947\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.38333333333333336\n", + "T-Statistic: -4.2565386541927595\n", + "P-Value: 0.02377386602678355\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3629943502824859\n", + "T-Statistic: -2.8181818181818175\n", + "P-Value: 0.06683875138704513\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 947\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 948\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.42499999999999993\n", + "T-Statistic: -3.2030019695384855\n", + "P-Value: 0.04921870916506516\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.4152542372881356\n", + "T-Statistic: -2.558118456493597\n", + "P-Value: 0.08335065260117518\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 948\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 949\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45901639344262296\n", + "Average of Other Ratios: 0.3875\n", + "T-Statistic: -1.7671941136183904\n", + "P-Value: 0.1753608911595616\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.2911016949152542\n", + "T-Statistic: -2.67070108252106\n", + "P-Value: 0.07564520129729006\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 949\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 950\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.42500000000000004\n", + "T-Statistic: -5.444705021331377\n", + "P-Value: 0.01216663456000868\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.37139830508474575\n", + "T-Statistic: -5.336986813075771\n", + "P-Value: 0.012860119949256482\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 950\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 951\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.3375\n", + "T-Statistic: -4.229541871678541\n", + "P-Value: 0.024178754668284275\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3220338983050847\n", + "Average of Other Ratios: 0.2701271186440678\n", + "T-Statistic: -4.975196209154729\n", + "P-Value: 0.015603617644457647\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 951\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 952\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.48087431693989074\n", + "T-Statistic: -2.870168301345979\n", + "P-Value: 0.06404150157384371\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.4682909604519774\n", + "T-Statistic: -4.883695241304617\n", + "P-Value: 0.01641608650487274\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 952\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 953\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.3958333333333333\n", + "T-Statistic: -8.130635229762294\n", + "P-Value: 0.003889900792911817\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.34985875706214686\n", + "T-Statistic: -2.370422697154792\n", + "P-Value: 0.09846632715247584\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 953\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 954\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.4541666666666667\n", + "T-Statistic: -4.048412402739574\n", + "P-Value: 0.02713900943382794\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.39272598870056497\n", + "T-Statistic: -3.3006539570698283\n", + "P-Value: 0.04571103238074162\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 954\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 955\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4519808743169399\n", + "T-Statistic: -2.7715039029613733\n", + "P-Value: 0.0694812123937475\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.4322033898305085\n", + "T-Statistic: -4.815101245041479\n", + "P-Value: 0.017061585926028165\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 955\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 956\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.4562841530054645\n", + "T-Statistic: -6.639086391609726\n", + "P-Value: 0.006962502866864333\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.559322033898305\n", + "Average of Other Ratios: 0.4262005649717514\n", + "T-Statistic: -4.051889951012785\n", + "P-Value: 0.027077901844717624\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 956\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 957\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.3402322404371585\n", + "T-Statistic: -5.379181269835038\n", + "P-Value: 0.012582469390043725\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.35169491525423724\n", + "T-Statistic: -2.9151315524126016\n", + "P-Value: 0.06173916082876997\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 957\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 958\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.4125\n", + "T-Statistic: -3.2184309034451113\n", + "P-Value: 0.04864239224720965\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3755649717514124\n", + "T-Statistic: -10.56303549746513\n", + "P-Value: 0.0018124532293534081\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 958\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 959\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.3690573770491803\n", + "T-Statistic: -6.0781372785179055\n", + "P-Value: 0.008940851611301535\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.2911016949152542\n", + "T-Statistic: -10.742132190804174\n", + "P-Value: 0.0017250974273507926\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 959\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 960\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.37916666666666665\n", + "T-Statistic: -11.123418935025548\n", + "P-Value: 0.0015569047785033382\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.3686440677966102\n", + "T-Statistic: -2.812229917701684\n", + "P-Value: 0.06716864584643882\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 960\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 961\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.38749999999999996\n", + "T-Statistic: -8.52694846917119\n", + "P-Value: 0.0033883964608289563\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.37570621468926557\n", + "T-Statistic: -5.0089472186085136\n", + "P-Value: 0.015317175750995964\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 961\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 962\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.4227459016393443\n", + "T-Statistic: -3.8587298371444883\n", + "P-Value: 0.030759136480267034\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3753531073446328\n", + "T-Statistic: -3.3382526588579973\n", + "P-Value: 0.044445211846365185\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 962\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 963\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4936475409836066\n", + "T-Statistic: -2.5087837781411735\n", + "P-Value: 0.08703067245521756\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.4302966101694915\n", + "T-Statistic: -2.890628912501168\n", + "P-Value: 0.06298074227793468\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 963\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 964\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7377049180327869\n", + "Average of Other Ratios: 0.39583333333333337\n", + "T-Statistic: -21.672082554883993\n", + "P-Value: 0.00021500571596787181\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.39830508474576276\n", + "T-Statistic: -3.185621103989269\n", + "P-Value: 0.049878312712040275\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 964\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 965\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4441256830601093\n", + "T-Statistic: -4.114953099885994\n", + "P-Value: 0.025999880249744926\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.35868644067796607\n", + "T-Statistic: -5.327850954897623\n", + "P-Value: 0.012921287882286202\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 965\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 966\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7\n", + "Average of Other Ratios: 0.3152322404371585\n", + "T-Statistic: -27.094781124739157\n", + "P-Value: 0.00011032862635579921\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.31207627118644066\n", + "T-Statistic: -3.533152576889349\n", + "P-Value: 0.03855209262090739\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 966\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 967\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.819672131147541\n", + "Average of Other Ratios: 0.375\n", + "T-Statistic: -9.79685729377205\n", + "P-Value: 0.002260247404737733\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3608757062146893\n", + "T-Statistic: -1.4303797468354427\n", + "P-Value: 0.38842195960522763\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 967\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 968\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4918032786885246\n", + "Average of Other Ratios: 0.325\n", + "T-Statistic: -4.952772918130208\n", + "P-Value: 0.015797788968726655\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.29555084745762716\n", + "T-Statistic: -7.084467611285733\n", + "P-Value: 0.005784199991243675\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 968\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 969\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.4083333333333333\n", + "T-Statistic: -7.3460047131317\n", + "P-Value: 0.00521288063379473\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3987758945386064\n", + "T-Statistic: -5.717277635038901\n", + "P-Value: 0.029257011705986744\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 969\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 970\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.3919854280510018\n", + "T-Statistic: -1.2592474439830368\n", + "P-Value: 0.33499662016654086\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.32888418079096043\n", + "T-Statistic: -2.287766368648155\n", + "P-Value: 0.10617410540553696\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 970\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 971\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.41250000000000003\n", + "T-Statistic: -6.491699980107792\n", + "P-Value: 0.00742140717977802\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.42627118644067796\n", + "T-Statistic: -3.519233773599045\n", + "P-Value: 0.03893867159865643\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 971\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 972\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.39583333333333337\n", + "T-Statistic: -16.47597884542129\n", + "P-Value: 0.0004866171022863723\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3332627118644068\n", + "T-Statistic: -7.238793509124532\n", + "P-Value: 0.005437657782298041\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 972\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 973\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6833333333333333\n", + "Average of Other Ratios: 0.4605874316939891\n", + "T-Statistic: -9.319488874116459\n", + "P-Value: 0.002615651895897316\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.4092514124293785\n", + "T-Statistic: -6.2886655641721925\n", + "P-Value: 0.008120996607005396\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 973\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 974\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.37725409836065577\n", + "T-Statistic: -9.746825292040107\n", + "P-Value: 0.0022943725355199732\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.4025423728813559\n", + "T-Statistic: -2.8010960513215215\n", + "P-Value: 0.06779122416384425\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 974\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 975\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.4519125683060109\n", + "T-Statistic: -4.467667765204196\n", + "P-Value: 0.020891129917487965\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.40473163841807913\n", + "T-Statistic: -3.46978940590354\n", + "P-Value: 0.040352519825929224\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 975\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 976\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.3833333333333333\n", + "T-Statistic: -3.8203179931250597\n", + "P-Value: 0.03156651788894358\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3502824858757062\n", + "T-Statistic: -4.196397990844169\n", + "P-Value: 0.024688099628686736\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 976\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 977\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7540983606557377\n", + "Average of Other Ratios: 0.41250000000000003\n", + "T-Statistic: -6.306431273644387\n", + "P-Value: 0.008056408388901619\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.417725988700565\n", + "T-Statistic: -7.10832364958241\n", + "P-Value: 0.005728765340993184\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 977\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 978\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7166666666666667\n", + "Average of Other Ratios: 0.4853142076502732\n", + "T-Statistic: -5.664156791225322\n", + "P-Value: 0.010898310561203092\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.46836158192090394\n", + "T-Statistic: -9.941988596598865\n", + "P-Value: 0.0021649872660866308\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 978\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 979\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.35239071038251363\n", + "T-Statistic: -10.603077839669773\n", + "P-Value: 0.001792421776010322\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.2995762711864407\n", + "T-Statistic: -10.04928647585347\n", + "P-Value: 0.00209795503470734\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 979\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 980\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.42288251366120216\n", + "T-Statistic: -2.792068808043707\n", + "P-Value: 0.06830128777723389\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.3220338983050848\n", + "T-Statistic: -3.6987295116025987\n", + "P-Value: 0.03430817170101989\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 980\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 981\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.3666666666666667\n", + "T-Statistic: -6.797930999977556\n", + "P-Value: 0.00650881472279479\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.33312146892655364\n", + "T-Statistic: -2.347903032601037\n", + "P-Value: 0.1004970270572768\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 981\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 982\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.4416666666666667\n", + "T-Statistic: -2.8952739132187233\n", + "P-Value: 0.06274299207471501\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.3836864406779661\n", + "T-Statistic: -3.485462485693465\n", + "P-Value: 0.03989737572645941\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 982\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 983\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.4230191256830601\n", + "T-Statistic: -3.5968393291702223\n", + "P-Value: 0.03684441767090311\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3925141242937853\n", + "T-Statistic: -3.5865984774856337\n", + "P-Value: 0.03711240322472348\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 983\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 984\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.36495901639344264\n", + "T-Statistic: -8.629044905647506\n", + "P-Value: 0.003273211490827028\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.36666666666666664\n", + "Average of Other Ratios: 0.326271186440678\n", + "T-Statistic: -3.788162541206022\n", + "P-Value: 0.03226341112647686\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 984\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 985\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.4041666666666667\n", + "T-Statistic: -2.9757050833898977\n", + "P-Value: 0.05879878128106479\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5423728813559322\n", + "Average of Other Ratios: 0.3923728813559322\n", + "T-Statistic: -20.347964212104664\n", + "P-Value: 0.00025950431091567957\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 985\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 986\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7377049180327869\n", + "Average of Other Ratios: 0.4666666666666667\n", + "T-Statistic: -15.055959655317555\n", + "P-Value: 0.0006360495232655051\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.4134887005649717\n", + "T-Statistic: -3.901282639398381\n", + "P-Value: 0.029895400966674732\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 986\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 987\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.42083333333333334\n", + "T-Statistic: -3.1359127797177497\n", + "P-Value: 0.05182736123156866\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.3983050847457627\n", + "T-Statistic: -5.867173552077365\n", + "P-Value: 0.009874902457062155\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 987\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 988\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7377049180327869\n", + "Average of Other Ratios: 0.36666666666666664\n", + "T-Statistic: -12.510329824209249\n", + "P-Value: 0.0011009408982364437\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.38333333333333336\n", + "Average of Other Ratios: 0.3220338983050848\n", + "T-Statistic: -2.4570411640937726\n", + "P-Value: 0.09110685313385442\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 988\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 989\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.4400273224043716\n", + "T-Statistic: -4.731321598151042\n", + "P-Value: 0.017895516622468247\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.40254237288135597\n", + "T-Statistic: -3.2790242451070704\n", + "P-Value: 0.04645996816866966\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 989\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 990\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.41666666666666663\n", + "T-Statistic: -8.004718441897252\n", + "P-Value: 0.004069631315123374\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.559322033898305\n", + "Average of Other Ratios: 0.38418079096045193\n", + "T-Statistic: -4.595745763298827\n", + "P-Value: 0.019360742557531358\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 990\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 991\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.37083333333333335\n", + "T-Statistic: -10.34387657107953\n", + "P-Value: 0.0019275165468191863\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.3940677966101695\n", + "T-Statistic: -4.750402896977879\n", + "P-Value: 0.017701003201901513\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 991\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 992\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.4271174863387978\n", + "T-Statistic: -4.102891976310035\n", + "P-Value: 0.026201720340923294\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.363135593220339\n", + "T-Statistic: -4.5211227964641365\n", + "P-Value: 0.02023399027426734\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 992\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 993\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.4565573770491803\n", + "T-Statistic: -3.871284033511624\n", + "P-Value: 0.030501016317830993\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.4175141242937853\n", + "T-Statistic: -2.8647998103065557\n", + "P-Value: 0.06432351407478958\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 993\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 994\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.375\n", + "T-Statistic: -7.362230013436795\n", + "P-Value: 0.005179936955568627\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.3798022598870056\n", + "T-Statistic: -14.122758056462086\n", + "P-Value: 0.0007690037706351588\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 994\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 995\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.4041666666666667\n", + "T-Statistic: -5.629951970401516\n", + "P-Value: 0.01108422263908312\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.33764124293785314\n", + "T-Statistic: -3.4045365361503497\n", + "P-Value: 0.042320045665658175\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 995\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 996\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.38989071038251366\n", + "T-Statistic: -7.027749672525287\n", + "P-Value: 0.005918864007946058\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3712570621468927\n", + "T-Statistic: -6.263567998756802\n", + "P-Value: 0.008213404490252622\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 996\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 997\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.4684426229508197\n", + "T-Statistic: -3.0715294425556663\n", + "P-Value: 0.05449687138415423\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.4639830508474576\n", + "T-Statistic: -3.2766095606229877\n", + "P-Value: 0.04654453906356892\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 997\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 998\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.3942622950819672\n", + "T-Statistic: -5.155433917254585\n", + "P-Value: 0.01415042791070789\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.3123587570621469\n", + "T-Statistic: -5.08813713983205\n", + "P-Value: 0.01467150017877107\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 998\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 999\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.4093806921675774\n", + "T-Statistic: -2.1811187398383804\n", + "P-Value: 0.16093929016287928\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.3728813559322034\n", + "T-Statistic: -3.5693253330753327\n", + "P-Value: 0.03757007584643612\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 999\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1000\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4518442622950819\n", + "T-Statistic: -2.7116536407871275\n", + "P-Value: 0.07306231680161718\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.364406779661017\n", + "T-Statistic: -5.27455297545764\n", + "P-Value: 0.01328581900289993\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1000\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1001\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.38995901639344266\n", + "T-Statistic: -4.717870559277373\n", + "P-Value: 0.01803430740365783\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.35451977401129947\n", + "T-Statistic: -6.7777777777777715\n", + "P-Value: 0.006564177674076621\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1001\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1002\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.33572404371584696\n", + "T-Statistic: -5.92922902548398\n", + "P-Value: 0.009587374714266027\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.27118644067796605\n", + "T-Statistic: -2.510197384488104\n", + "P-Value: 0.0869224831963583\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1002\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1003\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7540983606557377\n", + "Average of Other Ratios: 0.31666666666666665\n", + "T-Statistic: -8.09966109145276\n", + "P-Value: 0.0039331240116504485\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.30338983050847457\n", + "T-Statistic: -5.263055373168918\n", + "P-Value: 0.034257089055649254\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1003\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1004\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.3855191256830601\n", + "T-Statistic: -2.7763365971020724\n", + "P-Value: 0.06920166849483687\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.3077683615819209\n", + "T-Statistic: -1.8092905922289133\n", + "P-Value: 0.16810978065452678\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1004\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1005\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.35833333333333334\n", + "T-Statistic: -13.376800824017138\n", + "P-Value: 0.0009031187936782386\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.33742937853107347\n", + "T-Statistic: -2.129023252677582\n", + "P-Value: 0.12312500629142373\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1005\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1006\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.3734972677595629\n", + "T-Statistic: -2.323557226050761\n", + "P-Value: 0.10274990807505914\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.3163841807909604\n", + "T-Statistic: -4.041451884327386\n", + "P-Value: 0.02726185440936123\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1006\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1007\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.4319672131147541\n", + "T-Statistic: -3.959212335659781\n", + "P-Value: 0.028768976615567175\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.4301553672316384\n", + "T-Statistic: -2.476997933163102\n", + "P-Value: 0.08950755799392057\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1007\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1008\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.3937841530054645\n", + "T-Statistic: -3.3540166875307085\n", + "P-Value: 0.04392773109730455\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.3305084745762712\n", + "T-Statistic: -3.9024896268095732\n", + "P-Value: 0.029871358819325514\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1008\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1009\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.3860655737704918\n", + "T-Statistic: -6.951550947054901\n", + "P-Value: 0.006106360263638599\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.36687853107344637\n", + "T-Statistic: -2.115310434853906\n", + "P-Value: 0.12473633813156884\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1009\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1010\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7704918032786885\n", + "Average of Other Ratios: 0.3875\n", + "T-Statistic: -11.40102880068347\n", + "P-Value: 0.0014479028367797866\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.40487288135593225\n", + "T-Statistic: -4.138026787575419\n", + "P-Value: 0.025619300883706696\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1010\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1011\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.43572404371584705\n", + "T-Statistic: -11.348123445291295\n", + "P-Value: 0.0014678760475146785\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3541431261770245\n", + "T-Statistic: -2.568187024807373\n", + "P-Value: 0.1240305945387\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1011\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1012\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7213114754098361\n", + "Average of Other Ratios: 0.2875\n", + "T-Statistic: -27.500378882331333\n", + "P-Value: 0.00010553371875927762\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.29103107344632767\n", + "T-Statistic: -3.7674177162295606\n", + "P-Value: 0.03272349692653532\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1012\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1013\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.30683060109289617\n", + "T-Statistic: -10.923824320280426\n", + "P-Value: 0.0016420917950501913\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3220338983050847\n", + "Average of Other Ratios: 0.26151129943502827\n", + "T-Statistic: -3.8711758641214336\n", + "P-Value: 0.03050322842853845\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1013\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1014\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.3125\n", + "T-Statistic: -3.893753295658278\n", + "P-Value: 0.03004594161105454\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.2489406779661017\n", + "T-Statistic: -6.249324287797364\n", + "P-Value: 0.008266464021748235\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1014\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1015\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.3523907103825137\n", + "T-Statistic: -6.8536427559842705\n", + "P-Value: 0.006358933335598355\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3287429378531073\n", + "T-Statistic: -1.9757525677542294\n", + "P-Value: 0.1426456503116112\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1015\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1016\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.4937158469945355\n", + "T-Statistic: -4.691276871564279\n", + "P-Value: 0.018312849057309264\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.46002824858757063\n", + "T-Statistic: -3.229172176597325\n", + "P-Value: 0.048246204514222006\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1016\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1017\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.33333333333333337\n", + "T-Statistic: -5.788824671837737\n", + "P-Value: 0.010254290938715958\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.27019774011299436\n", + "T-Statistic: -5.5858638786774515\n", + "P-Value: 0.011330025903708709\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1017\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1018\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.3273224043715847\n", + "T-Statistic: -5.436414841751896\n", + "P-Value: 0.012218244055546734\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3220338983050847\n", + "Average of Other Ratios: 0.26596045197740115\n", + "T-Statistic: -3.6973851037027248\n", + "P-Value: 0.03434015761009596\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1018\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1019\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4918032786885246\n", + "Average of Other Ratios: 0.3333333333333333\n", + "T-Statistic: -3.361655189247521\n", + "P-Value: 0.043679734758324804\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.274364406779661\n", + "T-Statistic: -4.1604734938620584\n", + "P-Value: 0.02525593036334952\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1019\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1020\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.37916666666666665\n", + "T-Statistic: -2.9532447616676007\n", + "P-Value: 0.059868156846439195\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.36666666666666664\n", + "Average of Other Ratios: 0.3347457627118644\n", + "T-Statistic: -1.9898190482959088\n", + "P-Value: 0.14070862393531006\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1020\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1021\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.43333333333333335\n", + "T-Statistic: -4.127992550132961\n", + "P-Value: 0.025783917599408773\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.35437853107344636\n", + "T-Statistic: -2.9292618143712588\n", + "P-Value: 0.06103707112726652\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1021\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1022\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.3692622950819672\n", + "T-Statistic: -8.104386708891786\n", + "P-Value: 0.003926488707999252\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3251412429378531\n", + "T-Statistic: -2.3178660050430633\n", + "P-Value: 0.10328535861126524\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1022\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1023\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.3666666666666667\n", + "T-Statistic: -4.309973795297764\n", + "P-Value: 0.022997950137582556\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3207627118644068\n", + "T-Statistic: -9.7408025920138\n", + "P-Value: 0.002298526341652219\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1023\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1024\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.3651639344262295\n", + "T-Statistic: -3.2122265752036507\n", + "P-Value: 0.04887311160481489\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.2786723163841808\n", + "T-Statistic: -7.110897442499789\n", + "P-Value: 0.005722826489597031\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1024\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1025\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.39002732240437155\n", + "T-Statistic: -3.9886831470208737\n", + "P-Value: 0.02821684888437735\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.32514124293785307\n", + "T-Statistic: -3.3894381008331114\n", + "P-Value: 0.04279257405370464\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1025\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1026\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.3458333333333333\n", + "T-Statistic: -4.2504183551249355\n", + "P-Value: 0.02386488383717604\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.31631355932203387\n", + "T-Statistic: -4.476394699131555\n", + "P-Value: 0.020781977238118643\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1026\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1027\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.32499999999999996\n", + "T-Statistic: -6.880879437718984\n", + "P-Value: 0.006287310969453765\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3050847457627119\n", + "Average of Other Ratios: 0.27838983050847455\n", + "T-Statistic: -1.909090909090913\n", + "P-Value: 0.15226214753320294\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1027\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1028\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4148907103825137\n", + "T-Statistic: -6.273699658520553\n", + "P-Value: 0.008175934849579894\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.32090395480225986\n", + "T-Statistic: -4.966780972281921\n", + "P-Value: 0.015676121865843336\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1028\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1029\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.375\n", + "T-Statistic: -2.754702614168314\n", + "P-Value: 0.07046406236659389\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.36278248587570616\n", + "T-Statistic: -8.000108500500483\n", + "P-Value: 0.004076417682476059\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1029\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1030\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4918032786885246\n", + "Average of Other Ratios: 0.3958333333333333\n", + "T-Statistic: -6.7433095204558455\n", + "P-Value: 0.006660310850275737\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.3771186440677966\n", + "T-Statistic: -3.8784935044945983\n", + "P-Value: 0.030354042786245806\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1030\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1031\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.38749999999999996\n", + "T-Statistic: -4.020027050959187\n", + "P-Value: 0.02764449995869553\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.3313559322033898\n", + "T-Statistic: -4.454545454545456\n", + "P-Value: 0.04688018499081239\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1031\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1032\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.4400273224043716\n", + "T-Statistic: -2.9245926757956453\n", + "P-Value: 0.061267955766122074\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3502824858757062\n", + "T-Statistic: -3.8334908600273225\n", + "P-Value: 0.03128660551954041\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1032\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1033\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.375\n", + "T-Statistic: -7.524590163934425\n", + "P-Value: 0.004864919741574715\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5423728813559322\n", + "Average of Other Ratios: 0.3545197740112994\n", + "T-Statistic: -6.51303802142941\n", + "P-Value: 0.0073525543697032375\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1033\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1034\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7333333333333333\n", + "Average of Other Ratios: 0.28647540983606556\n", + "T-Statistic: -23.29092393225503\n", + "P-Value: 0.00017339475386319076\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.2870762711864407\n", + "T-Statistic: -4.234130868902486\n", + "P-Value: 0.024109305527904665\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1034\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1035\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.3458333333333333\n", + "T-Statistic: -3.5424739195231005\n", + "P-Value: 0.038295935976556825\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.29950564971751414\n", + "T-Statistic: -1.737938290656899\n", + "P-Value: 0.18061180344428132\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1035\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1036\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.36099726775956287\n", + "T-Statistic: -6.160098807321899\n", + "P-Value: 0.008609295466603118\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.38333333333333336\n", + "Average of Other Ratios: 0.288135593220339\n", + "T-Statistic: -2.647722058442962\n", + "P-Value: 0.07714404224936885\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1036\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1037\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.4400956284153006\n", + "T-Statistic: -2.282897070641292\n", + "P-Value: 0.10665056460060143\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.35466101694915253\n", + "T-Statistic: -7.02229226282608\n", + "P-Value: 0.005932038385975604\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1037\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1038\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.41994535519125686\n", + "T-Statistic: -18.636835301251896\n", + "P-Value: 0.0028667184146571527\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3632062146892655\n", + "T-Statistic: -2.200120786201923\n", + "P-Value: 0.1151589518445011\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1038\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1039\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.4271174863387978\n", + "T-Statistic: -5.673540790894663\n", + "P-Value: 0.010848020755982598\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.35444915254237286\n", + "T-Statistic: -4.143365599990044\n", + "P-Value: 0.025532267312653276\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1039\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1040\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.44405737704918036\n", + "T-Statistic: -3.2047054428325206\n", + "P-Value: 0.04915465698343356\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3882062146892655\n", + "T-Statistic: -2.937104220060188\n", + "P-Value: 0.060651717076612384\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1040\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1041\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7377049180327869\n", + "Average of Other Ratios: 0.3625\n", + "T-Statistic: -11.657726867446977\n", + "P-Value: 0.0013559475300634065\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3415960451977401\n", + "T-Statistic: -3.9152539744439387\n", + "P-Value: 0.029618618166276877\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1041\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1042\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.36919398907103823\n", + "T-Statistic: -6.079597077972364\n", + "P-Value: 0.008934801065512471\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3203389830508475\n", + "T-Statistic: -4.2850226450669\n", + "P-Value: 0.023356119365012706\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1042\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1043\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7704918032786885\n", + "Average of Other Ratios: 0.3416666666666666\n", + "T-Statistic: -39.859982700803556\n", + "P-Value: 3.47437157745963e-05\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.34173728813559323\n", + "T-Statistic: -5.289174189736959\n", + "P-Value: 0.013184494475533716\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1043\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1044\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.33749999999999997\n", + "T-Statistic: -9.2248243559719\n", + "P-Value: 0.0026947741822498272\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.29103107344632767\n", + "T-Statistic: -3.585517945697556\n", + "P-Value: 0.03714082383617849\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1044\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1045\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7\n", + "Average of Other Ratios: 0.4937158469945355\n", + "T-Statistic: -8.461896046771958\n", + "P-Value: 0.0034645967635944996\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.559322033898305\n", + "Average of Other Ratios: 0.48128531073446335\n", + "T-Statistic: -3.3967856291561485\n", + "P-Value: 0.04256178912971218\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1045\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1046\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.33551912568306014\n", + "T-Statistic: -6.233747163298384\n", + "P-Value: 0.008325007314823952\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3377824858757062\n", + "T-Statistic: -3.3339462897065575\n", + "P-Value: 0.04458791920005854\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1046\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1047\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.36249999999999993\n", + "T-Statistic: -12.541115855481232\n", + "P-Value: 0.0010929740797637178\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.34745762711864403\n", + "T-Statistic: -3.119513763428299\n", + "P-Value: 0.052491375678345144\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1047\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1048\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.39166666666666666\n", + "T-Statistic: -3.4744562728924717\n", + "P-Value: 0.040216306514409425\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3121468926553672\n", + "T-Statistic: -9.141660034508423\n", + "P-Value: 0.00276691986404339\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1048\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1049\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.40204918032786885\n", + "T-Statistic: -2.291468930292345\n", + "P-Value: 0.10581353287285375\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.38333333333333336\n", + "Average of Other Ratios: 0.3305084745762712\n", + "T-Statistic: -10.796450033846\n", + "P-Value: 0.0016997103571064131\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1049\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1050\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.42500000000000004\n", + "T-Statistic: -3.9774734463808428\n", + "P-Value: 0.028425239599828966\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3672316384180791\n", + "T-Statistic: -2.274140974459845\n", + "P-Value: 0.10751386463711397\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1050\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1051\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.41898907103825134\n", + "T-Statistic: -7.392993166333208\n", + "P-Value: 0.005118225808922169\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.4007768361581921\n", + "T-Statistic: -6.242283371885267\n", + "P-Value: 0.008292858606378618\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1051\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1052\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.42732240437158475\n", + "T-Statistic: -2.616902813780725\n", + "P-Value: 0.07921236839961415\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.37711864406779666\n", + "T-Statistic: -8.982407047314\n", + "P-Value: 0.0029123702976661856\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1052\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1053\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.323224043715847\n", + "T-Statistic: -6.786089292025601\n", + "P-Value: 0.006541270274250083\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.288135593220339\n", + "T-Statistic: -5.089358370900108\n", + "P-Value: 0.014661823332742536\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1053\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1054\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.3608606557377049\n", + "T-Statistic: -6.358986733603966\n", + "P-Value: 0.007869248834482912\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3121468926553672\n", + "T-Statistic: -3.4347826086956523\n", + "P-Value: 0.04139322928919692\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1054\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1055\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7\n", + "Average of Other Ratios: 0.32786885245901637\n", + "T-Statistic: -15.380260404150008\n", + "P-Value: 0.0005970483655978608\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.2641242937853107\n", + "T-Statistic: -11.821155866809187\n", + "P-Value: 0.007080250048172652\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1055\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1056\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.44849726775956283\n", + "T-Statistic: -4.876103332248839\n", + "P-Value: 0.016485948236315353\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.40084745762711865\n", + "T-Statistic: -8.576023546581505\n", + "P-Value: 0.003332372210409887\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1056\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1057\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.3813296903460838\n", + "T-Statistic: -3.099243212836256\n", + "P-Value: 0.09023900265155212\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3121468926553672\n", + "T-Statistic: -4.822765530016256\n", + "P-Value: 0.016987841571810883\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1057\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1058\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.3650956284153006\n", + "T-Statistic: -4.977491357685013\n", + "P-Value: 0.015583918885552142\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3081214689265537\n", + "T-Statistic: -10.058098809188749\n", + "P-Value: 0.002092572636159546\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1058\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1059\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.38545081967213113\n", + "T-Statistic: -2.575853873389877\n", + "P-Value: 0.08207463601397176\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.32561205273069677\n", + "T-Statistic: -3.1284403669724816\n", + "P-Value: 0.08877909183169339\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1059\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1060\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.4\n", + "T-Statistic: -5.945876360497398\n", + "P-Value: 0.009512113138117317\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.2994350282485876\n", + "T-Statistic: -5.277823574803065\n", + "P-Value: 0.013263065803865479\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1060\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1061\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6833333333333333\n", + "Average of Other Ratios: 0.37739071038251365\n", + "T-Statistic: -5.80205523533122\n", + "P-Value: 0.010188900334164743\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.2915960451977401\n", + "T-Statistic: -2.792542124181373\n", + "P-Value: 0.06827442570311215\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1061\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1062\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45901639344262296\n", + "Average of Other Ratios: 0.375\n", + "T-Statistic: -2.8708139379727218\n", + "P-Value: 0.06400768969988004\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.29237288135593215\n", + "T-Statistic: -3.5351298218230793\n", + "P-Value: 0.07153794820044\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1062\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1063\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.4312158469945355\n", + "T-Statistic: -3.2417038925561115\n", + "P-Value: 0.04778912095848229\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.379590395480226\n", + "T-Statistic: -2.6941204566469295\n", + "P-Value: 0.07415454501482839\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1063\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1064\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.36933060109289617\n", + "T-Statistic: -6.847157062045659\n", + "P-Value: 0.006376146501783941\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3334039548022599\n", + "T-Statistic: -3.67018433388411\n", + "P-Value: 0.03499558816381141\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1064\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1065\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.36912568306010923\n", + "T-Statistic: -7.1062884927858425\n", + "P-Value: 0.005733467077920565\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.29971751412429376\n", + "T-Statistic: -3.1708738954340325\n", + "P-Value: 0.050446746960355386\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1065\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1066\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.4\n", + "T-Statistic: -11.123716102549825\n", + "P-Value: 0.0015567823708010134\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5423728813559322\n", + "Average of Other Ratios: 0.35014124293785315\n", + "T-Statistic: -8.799712020856967\n", + "P-Value: 0.003091964525888444\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1066\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1067\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.4166666666666667\n", + "T-Statistic: -2.859337167599795\n", + "P-Value: 0.06461206589437947\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.3644067796610169\n", + "T-Statistic: -3.63823689123835\n", + "P-Value: 0.03578597061066121\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1067\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1068\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.41475409836065574\n", + "T-Statistic: -8.016759438310023\n", + "P-Value: 0.004051975840158878\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.3940677966101695\n", + "T-Statistic: -2.6400000000000006\n", + "P-Value: 0.07765595434454427\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1068\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1069\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.37083333333333335\n", + "T-Statistic: -3.8415339067926904\n", + "P-Value: 0.03111726238521636\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.346045197740113\n", + "T-Statistic: -4.6989671174421845\n", + "P-Value: 0.01823173060555818\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1069\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1070\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.35273224043715845\n", + "T-Statistic: -8.96185148037276\n", + "P-Value: 0.0029318759099350045\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3163841807909604\n", + "T-Statistic: -9.237604307034024\n", + "P-Value: 0.002683909079797024\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1070\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1071\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6833333333333333\n", + "Average of Other Ratios: 0.3729508196721312\n", + "T-Statistic: -6.271729950256445\n", + "P-Value: 0.008183201757294712\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3204802259887005\n", + "T-Statistic: -6.160684868447694\n", + "P-Value: 0.008606983305788924\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1071\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1072\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.3416666666666667\n", + "T-Statistic: -3.845846875093736\n", + "P-Value: 0.031026938964603432\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.2838983050847458\n", + "T-Statistic: -7.452597186167861\n", + "P-Value: 0.005001392247010432\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1072\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1073\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.3443306010928962\n", + "T-Statistic: -6.819969086862238\n", + "P-Value: 0.006448975326342902\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.2701271186440678\n", + "T-Statistic: -5.680855091846446\n", + "P-Value: 0.01080903321709519\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1073\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1074\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.4083333333333333\n", + "T-Statistic: -2.753689634807033\n", + "P-Value: 0.0705238707366333\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3165960451977401\n", + "T-Statistic: -4.368550947552518\n", + "P-Value: 0.022184572544257958\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1074\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1075\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.3471766848816029\n", + "T-Statistic: -2.090326535903569\n", + "P-Value: 0.1717476912950698\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.32055084745762713\n", + "T-Statistic: -6.009400282160132\n", + "P-Value: 0.009231944143766397\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1075\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1076\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.33333333333333337\n", + "T-Statistic: -6.424891128611615\n", + "P-Value: 0.007642542451366619\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.2641242937853107\n", + "T-Statistic: -3.6019080768824088\n", + "P-Value: 0.06917625132966021\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1076\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1077\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.37745901639344265\n", + "T-Statistic: -5.307134884333113\n", + "P-Value: 0.013061404240156596\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3248587570621469\n", + "T-Statistic: -6.653056282246297\n", + "P-Value: 0.006920964466083356\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1077\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1078\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.38558743169398907\n", + "T-Statistic: -3.5605909993218026\n", + "P-Value: 0.037804245228875336\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.34173728813559323\n", + "T-Statistic: -2.5913248074535873\n", + "P-Value: 0.08098113762773655\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1078\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1079\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7704918032786885\n", + "Average of Other Ratios: 0.30833333333333335\n", + "T-Statistic: -13.194558267956465\n", + "P-Value: 0.0009405413630643912\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.35451977401129947\n", + "T-Statistic: -2.4666666666666663\n", + "P-Value: 0.09033115673485469\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1079\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1080\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7540983606557377\n", + "Average of Other Ratios: 0.3625\n", + "T-Statistic: -11.206538990050577\n", + "P-Value: 0.001523158238254348\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3712570621468927\n", + "T-Statistic: -2.1556241033270016\n", + "P-Value: 0.12006957075894573\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1080\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1081\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.47540983606557374\n", + "Average of Other Ratios: 0.3416666666666666\n", + "T-Statistic: -2.7938036914857083\n", + "P-Value: 0.06820289228132356\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3207627118644068\n", + "T-Statistic: -3.6925370142833875\n", + "P-Value: 0.03445581989413703\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1081\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1082\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.3652322404371584\n", + "T-Statistic: -6.1214946474517395\n", + "P-Value: 0.008763411957595306\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3080508474576271\n", + "T-Statistic: -6.565459858828986\n", + "P-Value: 0.007186932185349032\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1082\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1083\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.38333333333333336\n", + "T-Statistic: -8.021244634914645\n", + "P-Value: 0.004045425154564468\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.34625706214689267\n", + "T-Statistic: -2.534027979956117\n", + "P-Value: 0.08512323508130125\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1083\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1084\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.423224043715847\n", + "T-Statistic: -3.689180746726386\n", + "P-Value: 0.03453618347086469\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.33340395480225987\n", + "T-Statistic: -6.185153671923991\n", + "P-Value: 0.008511170916897891\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1084\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1085\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.42500000000000004\n", + "T-Statistic: -3.2776582466721216\n", + "P-Value: 0.04650778639185666\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.35861581920903957\n", + "T-Statistic: -6.26717557251908\n", + "P-Value: 0.008200036888372677\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1085\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1086\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.3666666666666667\n", + "T-Statistic: -2.5040961733848097\n", + "P-Value: 0.08739061882540976\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.291454802259887\n", + "T-Statistic: -4.773212266552967\n", + "P-Value: 0.01747207617796873\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1086\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1087\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7868852459016393\n", + "Average of Other Ratios: 0.3666666666666667\n", + "T-Statistic: -10.75090825425996\n", + "P-Value: 0.0017209617612538612\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3627824858757063\n", + "T-Statistic: -4.484419263456089\n", + "P-Value: 0.020682260794596567\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1087\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1088\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.45833333333333337\n", + "T-Statistic: -9.918032786885249\n", + "P-Value: 0.002180339575209168\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5423728813559322\n", + "Average of Other Ratios: 0.37097457627118646\n", + "T-Statistic: -5.427900519255732\n", + "P-Value: 0.012271547258470204\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1088\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1089\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7540983606557377\n", + "Average of Other Ratios: 0.3083333333333333\n", + "T-Statistic: -27.93518318279224\n", + "P-Value: 0.0001006969515148238\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3220338983050847\n", + "Average of Other Ratios: 0.29764595103578156\n", + "T-Statistic: -1.787227246173595\n", + "P-Value: 0.21581013089073905\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1089\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1090\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.3562841530054645\n", + "T-Statistic: -3.0165050287504305\n", + "P-Value: 0.05691679534461103\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.32895480225988705\n", + "T-Statistic: -7.500747797147324\n", + "P-Value: 0.0049095659068835425\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1090\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1091\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.4\n", + "T-Statistic: -2.792392128219332\n", + "P-Value: 0.06828293698877178\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.34173728813559323\n", + "T-Statistic: -9.414301217197753\n", + "P-Value: 0.0025394604898687696\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1091\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1092\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.3187841530054645\n", + "T-Statistic: -3.0923105332523986\n", + "P-Value: 0.053616690038148854\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.22768361581920904\n", + "T-Statistic: -2.8518518518518525\n", + "P-Value: 0.06501008999538796\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1092\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1093\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.35833333333333334\n", + "T-Statistic: -5.034902560136769\n", + "P-Value: 0.015101540087103106\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.38333333333333336\n", + "Average of Other Ratios: 0.2966101694915254\n", + "T-Statistic: -4.066314336189686\n", + "P-Value: 0.026826320116124475\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1093\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1094\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7377049180327869\n", + "Average of Other Ratios: 0.3416666666666667\n", + "T-Statistic: -6.611700289223251\n", + "P-Value: 0.0070448899240306145\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.32069209039548024\n", + "T-Statistic: -5.480791700836992\n", + "P-Value: 0.011945272816605556\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1094\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1095\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.3692622950819672\n", + "T-Statistic: -3.8314981973665483\n", + "P-Value: 0.03132874291483428\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.3080508474576271\n", + "T-Statistic: -3.679543437365697\n", + "P-Value: 0.034768279257972504\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1095\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1096\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.35\n", + "T-Statistic: -4.446971954645207\n", + "P-Value: 0.021152969420364667\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.29096045197740117\n", + "T-Statistic: -3.1558437213360135\n", + "P-Value: 0.05103452997518892\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1096\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1097\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4068306010928962\n", + "T-Statistic: -5.623397158172275\n", + "P-Value: 0.011120322780836269\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3584745762711864\n", + "T-Statistic: -3.746821857674737\n", + "P-Value: 0.0331886297757254\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1097\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1098\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.37684426229508194\n", + "T-Statistic: -3.3673616030988653\n", + "P-Value: 0.04349562581061357\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3586158192090395\n", + "T-Statistic: -15.62463762709096\n", + "P-Value: 0.0005697357387431864\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1098\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1099\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4918032786885246\n", + "Average of Other Ratios: 0.36250000000000004\n", + "T-Statistic: -3.567536931880534\n", + "P-Value: 0.03761787225198254\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.278954802259887\n", + "T-Statistic: -2.9455339690026325\n", + "P-Value: 0.06024089377159606\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1099\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1100\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6833333333333333\n", + "Average of Other Ratios: 0.33599726775956285\n", + "T-Statistic: -19.586542860762346\n", + "P-Value: 0.0002907615152797266\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.32492937853107345\n", + "T-Statistic: -3.107327943782055\n", + "P-Value: 0.05299174949102144\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1100\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1101\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.75\n", + "Average of Other Ratios: 0.3402322404371585\n", + "T-Statistic: -98.69856142441458\n", + "P-Value: 2.2928614096026574e-06\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.35444915254237286\n", + "T-Statistic: -4.977643693767403\n", + "P-Value: 0.01558261256489159\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1101\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1102\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.34763205828779603\n", + "T-Statistic: -2.164602989862948\n", + "P-Value: 0.16283491133527353\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.314406779661017\n", + "T-Statistic: -1.5149901793251697\n", + "P-Value: 0.2689983409780041\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1102\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1103\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7540983606557377\n", + "Average of Other Ratios: 0.4083333333333333\n", + "T-Statistic: -13.34516471643176\n", + "P-Value: 0.0009094713400266177\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.417725988700565\n", + "T-Statistic: -3.7729601826121053\n", + "P-Value: 0.03259975536350789\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1103\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1104\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.3486338797814208\n", + "T-Statistic: -4.5541089442634854\n", + "P-Value: 0.019841812100798096\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3050847457627119\n", + "Average of Other Ratios: 0.253319209039548\n", + "T-Statistic: -2.5734510578167744\n", + "P-Value: 0.0822460968971868\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1104\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1105\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.325\n", + "T-Statistic: -10.30712201881151\n", + "P-Value: 0.0019477541958685205\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3220338983050847\n", + "Average of Other Ratios: 0.2942090395480226\n", + "T-Statistic: -2.558441558441555\n", + "P-Value: 0.2372076297039761\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1105\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1106\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4426229508196721\n", + "Average of Other Ratios: 0.3416666666666666\n", + "T-Statistic: -9.384048173269779\n", + "P-Value: 0.0025634488134986455\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.27394067796610166\n", + "T-Statistic: -7.9213555129905915\n", + "P-Value: 0.004194695908330953\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1106\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1107\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.3570355191256831\n", + "T-Statistic: -5.484975321134636\n", + "P-Value: 0.011919951523226141\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.325\n", + "T-Statistic: -3.3471216807729713\n", + "P-Value: 0.04415312534526643\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1107\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1108\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.4395491803278689\n", + "T-Statistic: -6.233752412361234\n", + "P-Value: 0.00832498749577228\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.40918079096045196\n", + "T-Statistic: -3.9630593303183272\n", + "P-Value: 0.028696120256434428\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1108\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1109\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.33613387978142073\n", + "T-Statistic: -5.360731285926958\n", + "P-Value: 0.012702901609225676\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.2911016949152542\n", + "T-Statistic: -3.4316055779260313\n", + "P-Value: 0.0414893603948339\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1109\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1110\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.4563524590163935\n", + "T-Statistic: -3.2403169660324136\n", + "P-Value: 0.04783943753394908\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.559322033898305\n", + "Average of Other Ratios: 0.400635593220339\n", + "T-Statistic: -8.675158396579372\n", + "P-Value: 0.0032228830687179013\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1110\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1111\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.3875\n", + "T-Statistic: -20.121911614894287\n", + "P-Value: 0.0002682965121865791\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3427495291902072\n", + "T-Statistic: -3.250494467879352\n", + "P-Value: 0.08302849336823244\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1111\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1112\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.4083333333333333\n", + "T-Statistic: -9.166095697580069\n", + "P-Value: 0.002745459142872881\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.38008474576271184\n", + "T-Statistic: -3.4264466597407415\n", + "P-Value: 0.04164606550795528\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1112\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1113\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.35833333333333334\n", + "T-Statistic: -5.742446164819555\n", + "P-Value: 0.010487900478810956\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.320409604519774\n", + "T-Statistic: -6.489451756285192\n", + "P-Value: 0.007428710958179485\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1113\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1114\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.3666666666666667\n", + "T-Statistic: -7.815029537231881\n", + "P-Value: 0.004361685111408271\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.36278248587570616\n", + "T-Statistic: -4.518646959031556\n", + "P-Value: 0.02026382990059579\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1114\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1115\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.44999999999999996\n", + "T-Statistic: -3.1053610315875337\n", + "P-Value: 0.05307307725713686\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.559322033898305\n", + "Average of Other Ratios: 0.38806497175141247\n", + "T-Statistic: -9.914490233494313\n", + "P-Value: 0.002182622087591197\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1115\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1116\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.36454918032786887\n", + "T-Statistic: -3.2455626872934524\n", + "P-Value: 0.04764947889276525\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.38333333333333336\n", + "Average of Other Ratios: 0.3135593220338983\n", + "T-Statistic: -6.377512576754887\n", + "P-Value: 0.007804638131363213\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1116\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1117\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.37356557377049177\n", + "T-Statistic: -3.757035055298464\n", + "P-Value: 0.032956925528189414\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.2616525423728814\n", + "T-Statistic: -8.372415728614014\n", + "P-Value: 0.0035731576383694914\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1117\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1118\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.39999999999999997\n", + "T-Statistic: -5.493611809608042\n", + "P-Value: 0.011867900939411315\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3500706214689266\n", + "T-Statistic: -2.6979742584262723\n", + "P-Value: 0.07391274673643934\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1118\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1119\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.3897540983606558\n", + "T-Statistic: -7.467934583305662\n", + "P-Value: 0.004971898730909863\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.3686440677966102\n", + "T-Statistic: -7.629306376694646\n", + "P-Value: 0.004675053414430021\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1119\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1120\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.42342896174863387\n", + "T-Statistic: -4.800324631602227\n", + "P-Value: 0.01720494304786845\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.3728813559322034\n", + "T-Statistic: -2.1448905695991947\n", + "P-Value: 0.12129141727935283\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1120\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1121\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.33749999999999997\n", + "T-Statistic: -14.977768942526218\n", + "P-Value: 0.0006459569455958266\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3162429378531073\n", + "T-Statistic: -3.2177938896146965\n", + "P-Value: 0.04866601725501004\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1121\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1122\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.29849726775956287\n", + "T-Statistic: -4.379387604175447\n", + "P-Value: 0.022038198857139507\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.2699152542372881\n", + "T-Statistic: -6.495776752201798\n", + "P-Value: 0.007408187085051301\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1122\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1123\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4918032786885246\n", + "Average of Other Ratios: 0.39166666666666666\n", + "T-Statistic: -3.42163677485204\n", + "P-Value: 0.041792847188906634\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.3432203389830508\n", + "T-Statistic: -3.505866051946874\n", + "P-Value: 0.039314608332933376\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1123\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1124\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7213114754098361\n", + "Average of Other Ratios: 0.35833333333333334\n", + "T-Statistic: -16.463141062057648\n", + "P-Value: 0.0004877463789961118\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3168785310734463\n", + "T-Statistic: -3.110976497704088\n", + "P-Value: 0.052841304640431436\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1124\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1125\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6833333333333333\n", + "Average of Other Ratios: 0.3688524590163934\n", + "T-Statistic: -7.8974364585461565\n", + "P-Value: 0.00423151561991115\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3041666666666667\n", + "T-Statistic: -4.0423517251725425\n", + "P-Value: 0.027245933023793766\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1125\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1126\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.37083333333333335\n", + "T-Statistic: -3.8425267583017693\n", + "P-Value: 0.03109643993412762\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.32627118644067793\n", + "T-Statistic: -1.6531163063339527\n", + "P-Value: 0.24012067051362174\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1126\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1127\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.3916666666666667\n", + "T-Statistic: -4.682079879777176\n", + "P-Value: 0.01841047771126608\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.325894538606403\n", + "T-Statistic: -9.506969760375863\n", + "P-Value: 0.01088379244125519\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1127\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1128\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.4646174863387978\n", + "T-Statistic: -5.6726426012368485\n", + "P-Value: 0.010852821078198268\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3924435028248588\n", + "T-Statistic: -4.386052107630403\n", + "P-Value: 0.021948799914748462\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1128\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1129\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4426229508196721\n", + "Average of Other Ratios: 0.31666666666666665\n", + "T-Statistic: -3.324802624172649\n", + "P-Value: 0.04489286075866494\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3050847457627119\n", + "Average of Other Ratios: 0.2489406779661017\n", + "T-Statistic: -3.2234880893826983\n", + "P-Value: 0.04845534939455295\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1129\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1130\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.31666666666666665\n", + "T-Statistic: -5.419860209810303\n", + "P-Value: 0.012322162365242495\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.36666666666666664\n", + "Average of Other Ratios: 0.2966101694915254\n", + "T-Statistic: -6.403332465729597\n", + "P-Value: 0.00771574670699146\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1130\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1131\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.36045081967213116\n", + "T-Statistic: -4.440891852865153\n", + "P-Value: 0.02123070084724967\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3416666666666667\n", + "T-Statistic: -4.837646025991297\n", + "P-Value: 0.016845843964025876\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1131\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1132\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.3941256830601093\n", + "T-Statistic: -3.223851145752677\n", + "P-Value: 0.04844195656899026\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3288841807909605\n", + "T-Statistic: -6.129713906579408\n", + "P-Value: 0.0087302972100172\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1132\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1133\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.45833333333333337\n", + "T-Statistic: -6.722810708442813\n", + "P-Value: 0.00671836273975336\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3926553672316384\n", + "T-Statistic: -3.4156502553198673\n", + "P-Value: 0.04197645602545185\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1133\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1134\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.41461748633879786\n", + "T-Statistic: -4.609601498177781\n", + "P-Value: 0.01920401476029972\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.3771186440677966\n", + "T-Statistic: -3.6395833778303657\n", + "P-Value: 0.03575220047050311\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1134\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1135\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.3482923497267759\n", + "T-Statistic: -3.935890584039202\n", + "P-Value: 0.029215786175393728\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.24879943502824858\n", + "T-Statistic: -5.13286206856859\n", + "P-Value: 0.014322469145621968\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1135\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1136\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.35\n", + "T-Statistic: -14.97130283665096\n", + "P-Value: 0.0006467854175943383\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.25838041431261766\n", + "T-Statistic: -2.478383793597629\n", + "P-Value: 0.13145422697640002\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1136\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1137\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7540983606557377\n", + "Average of Other Ratios: 0.3541666666666667\n", + "T-Statistic: -16.708601239682135\n", + "P-Value: 0.00046674362746457226\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3209745762711865\n", + "T-Statistic: -3.005502067896227\n", + "P-Value: 0.05741679214585345\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1137\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1138\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.37916666666666665\n", + "T-Statistic: -2.2740071535885997\n", + "P-Value: 0.10752712396614864\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.29103107344632767\n", + "T-Statistic: -3.1775375076563694\n", + "P-Value: 0.05018888898927705\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1138\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1139\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.43907103825136606\n", + "T-Statistic: -2.820281518337947\n", + "P-Value: 0.06672285377972131\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.38411016949152543\n", + "T-Statistic: -2.4611692476881775\n", + "P-Value: 0.09077318658872167\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1139\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1140\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.3625\n", + "T-Statistic: -3.419275812902317\n", + "P-Value: 0.04186513660401589\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3458333333333333\n", + "T-Statistic: -4.644284279214491\n", + "P-Value: 0.018818859918736594\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1140\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1141\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.35833333333333334\n", + "T-Statistic: -6.456069857786826\n", + "P-Value: 0.007538278964860644\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.32055084745762713\n", + "T-Statistic: -4.426845916974451\n", + "P-Value: 0.021411691624411183\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1141\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1142\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.4291666666666667\n", + "T-Statistic: -1.8998904386836808\n", + "P-Value: 0.15364813434857005\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.37153954802259886\n", + "T-Statistic: -3.1115472159109827\n", + "P-Value: 0.052817820285826836\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1142\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1143\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.44999999999999996\n", + "T-Statistic: -3.585966147227154\n", + "P-Value: 0.03712903167578977\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.4094632768361582\n", + "T-Statistic: -3.037748906538044\n", + "P-Value: 0.055966806959142974\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1143\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1144\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.4291666666666667\n", + "T-Statistic: -4.10299965843394\n", + "P-Value: 0.02619990938534542\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.40494350282485875\n", + "T-Statistic: -3.713511479606386\n", + "P-Value: 0.03395898358756934\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1144\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1145\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.37916666666666665\n", + "T-Statistic: -5.466397186258622\n", + "P-Value: 0.01203293462437204\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.36864406779661013\n", + "T-Statistic: -5.628616053819864\n", + "P-Value: 0.011091567639778466\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1145\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1146\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.3855191256830601\n", + "T-Statistic: -3.236738871470941\n", + "P-Value: 0.047969557784140165\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3547316384180791\n", + "T-Statistic: -2.9144455089837993\n", + "P-Value: 0.061773504814927487\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1146\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1147\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.38982240437158466\n", + "T-Statistic: -7.304853191112693\n", + "P-Value: 0.005297682541612248\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3204802259887006\n", + "T-Statistic: -7.80733944954129\n", + "P-Value: 0.0043741006479608106\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1147\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1148\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.8360655737704918\n", + "Average of Other Ratios: 0.34583333333333327\n", + "T-Statistic: -17.804840632796406\n", + "P-Value: 0.00038631936941714036\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.3813559322033898\n", + "T-Statistic: -3.1047293380092054\n", + "P-Value: 0.05309922984140076\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1148\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1149\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.3708333333333333\n", + "T-Statistic: -5.272729644436734\n", + "P-Value: 0.01329852584746764\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.35444915254237286\n", + "T-Statistic: -2.8491361177963865\n", + "P-Value: 0.06515525258058204\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1149\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1150\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.3483606557377049\n", + "T-Statistic: -4.11223200027819\n", + "P-Value: 0.026045241625233456\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.26560734463276836\n", + "T-Statistic: -6.047006896155811\n", + "P-Value: 0.009071170018542232\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1150\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1151\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4918032786885246\n", + "Average of Other Ratios: 0.38750000000000007\n", + "T-Statistic: -4.357648863022137\n", + "P-Value: 0.022333101933145045\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.35000000000000003\n", + "T-Statistic: -2.2152876697379726\n", + "P-Value: 0.11354046971526367\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1151\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1152\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.32916666666666666\n", + "T-Statistic: -6.767511177347243\n", + "P-Value: 0.006592619506710128\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.3347457627118644\n", + "T-Statistic: -4.554467869037332\n", + "P-Value: 0.019837599235630846\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1152\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1153\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.4186475409836065\n", + "T-Statistic: -2.434445731879166\n", + "P-Value: 0.0929600077713279\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.34180790960451973\n", + "T-Statistic: -6.35085296108589\n", + "P-Value: 0.007897838244939661\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1153\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1154\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.36666666666666664\n", + "T-Statistic: -5.1238506750677635\n", + "P-Value: 0.0143919153374869\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.33050847457627114\n", + "T-Statistic: -2.733333333333336\n", + "P-Value: 0.07173922515901705\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1154\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1155\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.41072404371584703\n", + "T-Statistic: -10.34274767901109\n", + "P-Value: 0.0019281339723313756\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.346045197740113\n", + "T-Statistic: -13.339459376998303\n", + "P-Value: 0.0009106232895004249\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1155\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1156\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.35280054644808745\n", + "T-Statistic: -9.808946709611767\n", + "P-Value: 0.0022521026539224693\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.2702683615819209\n", + "T-Statistic: -5.290392517155741\n", + "P-Value: 0.013176097120409095\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1156\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1157\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.43531420765027323\n", + "T-Statistic: -2.293238524273031\n", + "P-Value: 0.10564172479014122\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3712570621468927\n", + "T-Statistic: -4.1945951901363205\n", + "P-Value: 0.024716200055892694\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1157\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1158\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.35273224043715845\n", + "T-Statistic: -8.189147517586767\n", + "P-Value: 0.0038099469501537706\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3499293785310735\n", + "T-Statistic: -3.9622923434347794\n", + "P-Value: 0.02871062689188144\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1158\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1159\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.36898907103825135\n", + "T-Statistic: -9.328181302832693\n", + "P-Value: 0.0026085416180980188\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.36666666666666664\n", + "Average of Other Ratios: 0.3305084745762712\n", + "T-Statistic: -3.3049457887636606\n", + "P-Value: 0.045564249549415244\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1159\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1160\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.375\n", + "T-Statistic: -3.3240845596280026\n", + "P-Value: 0.04491692010565002\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.326271186440678\n", + "T-Statistic: -5.094198380791346\n", + "P-Value: 0.014623553591468103\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1160\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1161\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.38162568306010936\n", + "T-Statistic: -5.53218920098128\n", + "P-Value: 0.011638997397509718\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3377824858757062\n", + "T-Statistic: -5.297341314999525\n", + "P-Value: 0.013128335633175506\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1161\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1162\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.4438524590163935\n", + "T-Statistic: -4.609409203433426\n", + "P-Value: 0.01920617859490499\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3799435028248587\n", + "T-Statistic: -4.13405315401556\n", + "P-Value: 0.02568432777343715\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1162\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1163\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.35416666666666663\n", + "T-Statistic: -3.463054126930755\n", + "P-Value: 0.040550141822148895\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.34625706214689267\n", + "T-Statistic: -3.451648730864812\n", + "P-Value: 0.040887609276162\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1163\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1164\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.3030054644808743\n", + "T-Statistic: -3.9665722337998846\n", + "P-Value: 0.028629798248878405\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.25727401129943506\n", + "T-Statistic: -3.278863962441917\n", + "P-Value: 0.0464655758073765\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1164\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1165\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.37336065573770494\n", + "T-Statistic: -8.550441690064975\n", + "P-Value: 0.003361422362271298\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.34745762711864403\n", + "T-Statistic: -4.256851455445149\n", + "P-Value: 0.02376922627056341\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1165\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1166\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4426229508196721\n", + "Average of Other Ratios: 0.375\n", + "T-Statistic: -4.685055463096139\n", + "P-Value: 0.01837881723814625\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.3163841807909604\n", + "T-Statistic: -3.333333333333336\n", + "P-Value: 0.044608278994442765\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1166\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1167\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.31127049180327865\n", + "T-Statistic: -9.651006540622726\n", + "P-Value: 0.002361656973312143\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.30783898305084745\n", + "T-Statistic: -3.2916992909941314\n", + "P-Value: 0.04601922125686048\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1167\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1168\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7166666666666667\n", + "Average of Other Ratios: 0.38599726775956283\n", + "T-Statistic: -15.465401308907632\n", + "P-Value: 0.0005873388348340652\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3500706214689266\n", + "T-Statistic: -3.1298749223282756\n", + "P-Value: 0.052070602215977944\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1168\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1169\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.2568306010928962\n", + "T-Statistic: -10.419231568279397\n", + "P-Value: 0.0018868885475834876\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.21920903954802262\n", + "T-Statistic: -2.9631204814438217\n", + "P-Value: 0.09751860335760698\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1169\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1170\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.8032786885245902\n", + "Average of Other Ratios: 0.37916666666666665\n", + "T-Statistic: -8.376268479684917\n", + "P-Value: 0.0035683913912364033\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3543785310734463\n", + "T-Statistic: -6.117934775079367\n", + "P-Value: 0.008777805575829042\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1170\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1171\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.3978825136612022\n", + "T-Statistic: -2.035753205981991\n", + "P-Value: 0.1345958069792505\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.35021186440677965\n", + "T-Statistic: -5.6736454811631605\n", + "P-Value: 0.010847461425876656\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1171\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1172\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.3980874316939891\n", + "T-Statistic: -7.600738322488857\n", + "P-Value: 0.004725867063471558\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3694915254237288\n", + "T-Statistic: -1.777777777777776\n", + "P-Value: 0.32619726158657\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1172\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1173\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7868852459016393\n", + "Average of Other Ratios: 0.39166666666666666\n", + "T-Statistic: -27.381546373206323\n", + "P-Value: 0.00010690928977807902\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3669491525423729\n", + "T-Statistic: -3.4506787823167455\n", + "P-Value: 0.040916473140808284\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1173\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1174\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.38333333333333336\n", + "Average of Other Ratios: 0.3278005464480874\n", + "T-Statistic: -5.343021351720979\n", + "P-Value: 0.01281992383399354\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.2923728813559322\n", + "T-Statistic: -6.160404875922859\n", + "P-Value: 0.00860808784608318\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1174\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1175\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.41857923497267757\n", + "T-Statistic: -3.3484587546107205\n", + "P-Value: 0.0441093025667818\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.32090395480225986\n", + "T-Statistic: -7.2901037265115765\n", + "P-Value: 0.0053285197621824095\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1175\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1176\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.33989071038251367\n", + "T-Statistic: -4.377554579883222\n", + "P-Value: 0.02206287000445764\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.286864406779661\n", + "T-Statistic: -3.6376865077145064\n", + "P-Value: 0.03579978607225466\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1176\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1177\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.39207650273224043\n", + "T-Statistic: -6.083165423794337\n", + "P-Value: 0.025975157859805906\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.39237288135593223\n", + "T-Statistic: -3.433070549496972\n", + "P-Value: 0.04144499780826165\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1177\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1178\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7704918032786885\n", + "Average of Other Ratios: 0.3208333333333333\n", + "T-Statistic: -15.854306407643215\n", + "P-Value: 0.0005455614883016697\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3372881355932204\n", + "T-Statistic: -3.3003043252568145\n", + "P-Value: 0.08083454343693727\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1178\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1179\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.28189890710382515\n", + "T-Statistic: -8.369667988980005\n", + "P-Value: 0.003576562016483774\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.2921845574387947\n", + "T-Statistic: -3.052528420785673\n", + "P-Value: 0.09264712636043493\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1179\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1180\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.3903005464480874\n", + "T-Statistic: -6.562635068330882\n", + "P-Value: 0.0071957310516421215\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.37966101694915255\n", + "T-Statistic: -4.431293675255974\n", + "P-Value: 0.02135416398761436\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1180\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1181\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.3730191256830601\n", + "T-Statistic: -6.867274869817217\n", + "P-Value: 0.006322952397605397\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3416666666666667\n", + "T-Statistic: -3.3728798057819063\n", + "P-Value: 0.043318525364938\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1181\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1182\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.32568306010928966\n", + "T-Statistic: -4.191489361702128\n", + "P-Value: 0.052479362028526676\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.295409604519774\n", + "T-Statistic: -5.351131248404091\n", + "P-Value: 0.012766161878428656\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1182\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1183\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.35000000000000003\n", + "T-Statistic: -6.5653506061794005\n", + "P-Value: 0.0071872722304183664\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3373587570621469\n", + "T-Statistic: -4.004253706903789\n", + "P-Value: 0.02793063892749739\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1183\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1184\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7\n", + "Average of Other Ratios: 0.3524590163934426\n", + "T-Statistic: -10.447778102751764\n", + "P-Value: 0.0018717942236663328\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3082627118644068\n", + "T-Statistic: -3.4319134544954104\n", + "P-Value: 0.04148003219890615\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1184\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1185\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.36249999999999993\n", + "T-Statistic: -70.37704918032789\n", + "P-Value: 6.3221094755592405e-06\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.34187853107344635\n", + "T-Statistic: -4.233170395077674\n", + "P-Value: 0.024123819842227313\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1185\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1186\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.45621584699453555\n", + "T-Statistic: -2.481511075867372\n", + "P-Value: 0.0891506613942868\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3927966101694915\n", + "T-Statistic: -2.484400110285176\n", + "P-Value: 0.08892311315035355\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1186\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1187\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4271174863387978\n", + "T-Statistic: -3.4482809420111318\n", + "P-Value: 0.040987939768970645\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3501412429378531\n", + "T-Statistic: -6.005437958250537\n", + "P-Value: 0.009249100739188856\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1187\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1188\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.3666666666666667\n", + "T-Statistic: -23.226517386743268\n", + "P-Value: 0.000174834804970807\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.3246468926553672\n", + "T-Statistic: -8.378851501317461\n", + "P-Value: 0.003565200635279848\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1188\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1189\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4918032786885246\n", + "Average of Other Ratios: 0.39583333333333337\n", + "T-Statistic: -7.677595628415292\n", + "P-Value: 0.004590789402025528\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3753531073446328\n", + "T-Statistic: -3.92529890966906\n", + "P-Value: 0.029421652663864435\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1189\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1190\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.3941256830601093\n", + "T-Statistic: -3.7613782647305922\n", + "P-Value: 0.03285902039196703\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3626412429378531\n", + "T-Statistic: -4.119009202618319\n", + "P-Value: 0.02593245296220258\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1190\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1191\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.3416666666666666\n", + "T-Statistic: -13.4125186386734\n", + "P-Value: 0.0008960173086668277\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.326271186440678\n", + "T-Statistic: -2.5654413105709444\n", + "P-Value: 0.08282085362511153\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1191\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1192\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.3273224043715847\n", + "T-Statistic: -3.665214293431724\n", + "P-Value: 0.0351170707372584\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.2235169491525424\n", + "T-Statistic: -2.8217002208427564\n", + "P-Value: 0.06664468710770881\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1192\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1193\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.52698087431694\n", + "T-Statistic: -2.6670409685182177\n", + "P-Value: 0.07588150981935955\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.559322033898305\n", + "Average of Other Ratios: 0.4976694915254237\n", + "T-Statistic: -2.725345181640537\n", + "P-Value: 0.0722232481118407\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1193\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1194\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.37916666666666665\n", + "T-Statistic: -22.125683060109264\n", + "P-Value: 0.00020211389888430634\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3484934086629002\n", + "T-Statistic: -3.5009766627567247\n", + "P-Value: 0.07279105093612302\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1194\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1195\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.3484289617486339\n", + "T-Statistic: -2.8306761052244935\n", + "P-Value: 0.06615277677623232\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.3038135593220339\n", + "T-Statistic: -4.999032554530682\n", + "P-Value: 0.015400604884304166\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1195\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1196\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.3771857923497267\n", + "T-Statistic: -2.618745744139447\n", + "P-Value: 0.07908677590411582\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3333333333333333\n", + "Average of Other Ratios: 0.30508474576271183\n", + "T-Statistic: -4.082482904638637\n", + "P-Value: 0.02654788546719936\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1196\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1197\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.3983606557377049\n", + "T-Statistic: -3.9828547448399347\n", + "P-Value: 0.02832495388510772\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3290960451977401\n", + "T-Statistic: -4.253217026259508\n", + "P-Value: 0.023823207735453863\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1197\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1198\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45901639344262296\n", + "Average of Other Ratios: 0.3708333333333333\n", + "T-Statistic: -2.3515482695810577\n", + "P-Value: 0.10016489140788615\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.3121468926553672\n", + "T-Statistic: -3.9090909090909123\n", + "P-Value: 0.02974030553139564\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1198\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1199\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.3691256830601093\n", + "T-Statistic: -7.521674420991118\n", + "P-Value: 0.004870350805489641\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.3165960451977401\n", + "T-Statistic: -3.096482698963895\n", + "P-Value: 0.05344214083896168\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1199\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1200\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.3650956284153005\n", + "T-Statistic: -41.98171043120708\n", + "P-Value: 2.974431577617351e-05\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3545197740112994\n", + "T-Statistic: -6.4902085496187105\n", + "P-Value: 0.007426251317674435\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1200\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1201\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.3980191256830601\n", + "T-Statistic: -3.2824011075555344\n", + "P-Value: 0.04634202450941883\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3628531073446328\n", + "T-Statistic: -2.838043708162847\n", + "P-Value: 0.06575238490643358\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1201\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1202\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.39344262295081966\n", + "Average of Other Ratios: 0.35416666666666663\n", + "T-Statistic: -3.142076502732241\n", + "P-Value: 0.051580525827867194\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3163841807909605\n", + "T-Statistic: -4.003203845127177\n", + "P-Value: 0.027949819151750765\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1202\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1203\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.4041666666666667\n", + "T-Statistic: -11.78086978660089\n", + "P-Value: 0.0013145795310678767\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.33354519774011293\n", + "T-Statistic: -2.8433656242933654\n", + "P-Value: 0.06546504459353619\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1203\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1204\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.38333333333333336\n", + "T-Statistic: -8.021840834286337\n", + "P-Value: 0.004044555451710541\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.37966101694915255\n", + "T-Statistic: -4.5490523794544675\n", + "P-Value: 0.01990128684164212\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1204\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1205\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.332103825136612\n", + "T-Statistic: -15.850883671738368\n", + "P-Value: 0.0005459116401288157\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.30783898305084745\n", + "T-Statistic: -2.3170196256606355\n", + "P-Value: 0.10336527760682507\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1205\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1206\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.37329234972677594\n", + "T-Statistic: -3.6587848750156016\n", + "P-Value: 0.03527502614042613\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3293079096045198\n", + "T-Statistic: -7.7130494574412145\n", + "P-Value: 0.004530195336094142\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1206\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1207\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.36666666666666664\n", + "Average of Other Ratios: 0.29863387978142075\n", + "T-Statistic: -2.4701740765596973\n", + "P-Value: 0.09005051324793042\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3050847457627119\n", + "Average of Other Ratios: 0.22344632768361583\n", + "T-Statistic: -3.080575323621966\n", + "P-Value: 0.05411151529897513\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1207\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1208\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7213114754098361\n", + "Average of Other Ratios: 0.3666666666666667\n", + "T-Statistic: -8.810214059275827\n", + "P-Value: 0.0030812498509113165\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.30357815442561203\n", + "T-Statistic: -3.206327062041345\n", + "P-Value: 0.08504610921846024\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1208\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1209\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.4105874316939891\n", + "T-Statistic: -1.5057124818059264\n", + "P-Value: 0.22921714041813998\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.32902542372881355\n", + "T-Statistic: -4.458405259144846\n", + "P-Value: 0.02100779526554658\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1209\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1210\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.22083333333333333\n", + "T-Statistic: -6.909375426419404\n", + "P-Value: 0.0062135098686104855\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.14752824858757063\n", + "T-Statistic: -6.30893579014757\n", + "P-Value: 0.008047357424001828\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1210\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1211\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.41509562841530057\n", + "T-Statistic: -6.219125997443245\n", + "P-Value: 0.00838045426704938\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.35021186440677965\n", + "T-Statistic: -2.551953540367511\n", + "P-Value: 0.08379991236656559\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1211\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1212\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.38995901639344266\n", + "T-Statistic: -5.020631036144959\n", + "P-Value: 0.015219613947787137\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3418079096045198\n", + "T-Statistic: -4.389381125701737\n", + "P-Value: 0.02190431958759874\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1212\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1213\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.3041666666666667\n", + "T-Statistic: -5.783528726099863\n", + "P-Value: 0.010280619668641678\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.288135593220339\n", + "Average of Other Ratios: 0.21532485875706214\n", + "T-Statistic: -4.8493624334243925\n", + "P-Value: 0.016735124561985816\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1213\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1214\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.34378415300546444\n", + "T-Statistic: -3.106070198750089\n", + "P-Value: 0.05304373654077345\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.2995056497175141\n", + "T-Statistic: -5.3217343632850165\n", + "P-Value: 0.012962453260928666\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1214\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1215\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.34433060109289615\n", + "T-Statistic: -4.489204827567853\n", + "P-Value: 0.020623088486061487\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.3432203389830508\n", + "T-Statistic: -2.896998294604734\n", + "P-Value: 0.06265501679556046\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1215\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1216\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45901639344262296\n", + "Average of Other Ratios: 0.29166666666666663\n", + "T-Statistic: -15.55542491509537\n", + "P-Value: 0.000577299561388648\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.21532485875706214\n", + "T-Statistic: -3.1169524568281517\n", + "P-Value: 0.05259605209854495\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1216\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1217\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.3945355191256831\n", + "T-Statistic: -2.804910878658928\n", + "P-Value: 0.0675771021782272\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3333333333333333\n", + "T-Statistic: -3.022438607339303\n", + "P-Value: 0.05664943058417436\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1217\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1218\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.3105191256830601\n", + "T-Statistic: -4.772347925976209\n", + "P-Value: 0.017480680770223765\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.26984463276836157\n", + "T-Statistic: -4.292077854283091\n", + "P-Value: 0.023254116236581038\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1218\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1219\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7377049180327869\n", + "Average of Other Ratios: 0.3375\n", + "T-Statistic: -19.209836065573764\n", + "P-Value: 0.0003080910681863436\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.30388418079096047\n", + "T-Statistic: -3.079787687349886\n", + "P-Value: 0.054144932145154395\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1219\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1220\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.3833333333333333\n", + "T-Statistic: -4.318708814548055\n", + "P-Value: 0.022874240057608075\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.32561205273069677\n", + "T-Statistic: -1.3195730536286363\n", + "P-Value: 0.31778111693939937\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1220\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1221\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.3239754098360656\n", + "T-Statistic: -5.08778567271395\n", + "P-Value: 0.014674286692950959\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3220338983050847\n", + "Average of Other Ratios: 0.2699152542372881\n", + "T-Statistic: -2.3318495798744\n", + "P-Value: 0.10197574453300426\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1221\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1222\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.3812158469945355\n", + "T-Statistic: -2.9098120190283097\n", + "P-Value: 0.06200608865891807\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.3389830508474576\n", + "T-Statistic: -3.119096328227442\n", + "P-Value: 0.05250841765245064\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1222\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1223\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.35\n", + "Average of Other Ratios: 0.2569672131147541\n", + "T-Statistic: -3.7827690734466466\n", + "P-Value: 0.03238222739381364\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3220338983050847\n", + "Average of Other Ratios: 0.1352401129943503\n", + "T-Statistic: -8.430881102204697\n", + "P-Value: 0.003501725063291807\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1223\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1224\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7377049180327869\n", + "Average of Other Ratios: 0.2875\n", + "T-Statistic: -16.3510804808083\n", + "P-Value: 0.0004977534397666609\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3050847457627119\n", + "Average of Other Ratios: 0.2489406779661017\n", + "T-Statistic: -7.020021492444736\n", + "P-Value: 0.005937531487381006\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1224\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1225\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4426229508196721\n", + "Average of Other Ratios: 0.41250000000000003\n", + "T-Statistic: -1.9095681132275377\n", + "P-Value: 0.15219065910307153\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.328954802259887\n", + "T-Statistic: -3.1151717210383336\n", + "P-Value: 0.05266898293338368\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1225\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1226\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.4041666666666667\n", + "T-Statistic: -4.560818191534335\n", + "P-Value: 0.019763254051599725\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3122881355932204\n", + "T-Statistic: -3.467950511615431\n", + "P-Value: 0.04040635351163493\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1226\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1227\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7377049180327869\n", + "Average of Other Ratios: 0.35416666666666663\n", + "T-Statistic: -13.929800773742766\n", + "P-Value: 0.0008010077875496463\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3260828625235405\n", + "T-Statistic: -3.6801457918777314\n", + "P-Value: 0.0665501469345416\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1227\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1228\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.4083333333333333\n", + "T-Statistic: -7.498639437160465\n", + "P-Value: 0.004913539903160214\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.354590395480226\n", + "T-Statistic: -5.215051241964461\n", + "P-Value: 0.01370881147200009\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1228\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1229\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.36919398907103823\n", + "T-Statistic: -2.8757941403931446\n", + "P-Value: 0.06374762528542408\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.3038135593220339\n", + "T-Statistic: -9.817377185403636\n", + "P-Value: 0.002246445968285984\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1229\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1230\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.28169398907103826\n", + "T-Statistic: -3.4441072444884826\n", + "P-Value: 0.04111271435550431\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3220338983050847\n", + "Average of Other Ratios: 0.26596045197740115\n", + "T-Statistic: -2.465458081419365\n", + "P-Value: 0.0904281089259112\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1230\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1231\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7166666666666667\n", + "Average of Other Ratios: 0.36133879781420764\n", + "T-Statistic: -10.501771543064775\n", + "P-Value: 0.0018436795460142676\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.36264124293785316\n", + "T-Statistic: -5.251461666642595\n", + "P-Value: 0.0134479218636803\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1231\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1232\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.32916666666666666\n", + "T-Statistic: -20.7021431957843\n", + "P-Value: 0.00024648403432591493\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.27443502824858756\n", + "T-Statistic: -3.458769578971492\n", + "P-Value: 0.04067649752950825\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1232\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1233\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.3896857923497268\n", + "T-Statistic: -7.435895472354905\n", + "P-Value: 0.005033772413915748\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.34173728813559323\n", + "T-Statistic: -3.108570507260323\n", + "P-Value: 0.052940452969860205\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1233\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1234\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.3541666666666667\n", + "T-Statistic: -5.824341929189313\n", + "P-Value: 0.01007998096990893\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.3220338983050847\n", + "T-Statistic: -3.55910413197229\n", + "P-Value: 0.037844293787820955\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1234\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1235\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.37752732240437153\n", + "T-Statistic: -6.922734884968511\n", + "P-Value: 0.0061793032033680305\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.31652542372881354\n", + "T-Statistic: -2.9875904328450478\n", + "P-Value: 0.058242586475556184\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1235\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1236\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7377049180327869\n", + "Average of Other Ratios: 0.41666666666666663\n", + "T-Statistic: -9.080332982450201\n", + "P-Value: 0.002821767606719206\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.4096045197740113\n", + "T-Statistic: -2.5245779797628782\n", + "P-Value: 0.0858312019877726\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1236\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1237\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.38155737704918036\n", + "T-Statistic: -3.5810708052340003\n", + "P-Value: 0.03725808735441134\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.3089453860640301\n", + "T-Statistic: -2.8553936637391883\n", + "P-Value: 0.10388662348248497\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1237\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1238\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.37916666666666665\n", + "T-Statistic: -4.242857392252246\n", + "P-Value: 0.02397794903308461\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3458333333333333\n", + "T-Statistic: -2.306948594944211\n", + "P-Value: 0.10432200362127822\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1238\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1239\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.3625\n", + "T-Statistic: -6.133284818784408\n", + "P-Value: 0.008715961470550591\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3038135593220339\n", + "T-Statistic: -7.137099922436499\n", + "P-Value: 0.005662825302674034\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1239\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1240\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.29494535519125686\n", + "T-Statistic: -3.242117432574401\n", + "P-Value: 0.04777413102072636\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.18559322033898304\n", + "T-Statistic: -10.368238367141146\n", + "P-Value: 0.0019142559726229866\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1240\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1241\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.38749999999999996\n", + "T-Statistic: -2.474836034730356\n", + "P-Value: 0.08967913937363099\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.2786016949152542\n", + "T-Statistic: -6.156258728408069\n", + "P-Value: 0.008624465754537623\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1241\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1242\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.37363387978142076\n", + "T-Statistic: -3.3164206595199035\n", + "P-Value: 0.04517472690275175\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.2956920903954802\n", + "T-Statistic: -2.834146743848625\n", + "P-Value: 0.06596378750306289\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1242\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1243\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.4066256830601093\n", + "T-Statistic: -18.30718715197799\n", + "P-Value: 0.0003555990078420392\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.34187853107344635\n", + "T-Statistic: -3.891797498256559\n", + "P-Value: 0.030085204803105746\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1243\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1244\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.3375\n", + "T-Statistic: -11.473067915690864\n", + "P-Value: 0.0014212819594170815\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.38333333333333336\n", + "Average of Other Ratios: 0.3389830508474576\n", + "T-Statistic: -2.266099806569284\n", + "P-Value: 0.1083141230940181\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1244\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1245\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.31058743169398906\n", + "T-Statistic: -4.4611858896576715\n", + "P-Value: 0.020972683531277587\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3220338983050847\n", + "Average of Other Ratios: 0.25741525423728817\n", + "T-Statistic: -2.4557675307968445\n", + "P-Value: 0.09121010163141306\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1245\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1246\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.4523224043715847\n", + "T-Statistic: -3.6924428701682177\n", + "P-Value: 0.034458070848123835\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.39258474576271185\n", + "T-Statistic: -3.5522131403755797\n", + "P-Value: 0.03803061001063327\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1246\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1247\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.8524590163934426\n", + "Average of Other Ratios: 0.3541666666666667\n", + "T-Statistic: -20.02461614309747\n", + "P-Value: 0.00027220292398855736\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.31207627118644066\n", + "T-Statistic: -5.440282569102571\n", + "P-Value: 0.012194130448693096\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1247\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1248\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.3416666666666667\n", + "T-Statistic: -15.731996671646355\n", + "P-Value: 0.0005582617929085287\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.36666666666666664\n", + "Average of Other Ratios: 0.32627118644067793\n", + "T-Statistic: -3.788162541206027\n", + "P-Value: 0.032263411126476765\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1248\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1249\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.4019808743169399\n", + "T-Statistic: -3.3737593587415304\n", + "P-Value: 0.04329038177941591\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.38333333333333336\n", + "Average of Other Ratios: 0.34745762711864403\n", + "T-Statistic: -3.2791258997889505\n", + "P-Value: 0.046456412128927795\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1249\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1250\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.36092896174863387\n", + "T-Statistic: -4.924844065432519\n", + "P-Value: 0.016044059231596925\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3220338983050847\n", + "Average of Other Ratios: 0.25741525423728817\n", + "T-Statistic: -5.952990444986046\n", + "P-Value: 0.009480187104661204\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1250\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1251\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.3666666666666667\n", + "T-Statistic: -4.511917593907792\n", + "P-Value: 0.020345223247711267\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.3258003766478343\n", + "T-Statistic: -3.4999999999999902\n", + "P-Value: 0.07282735005446969\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1251\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1252\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.37759562841530053\n", + "T-Statistic: -3.483025090320139\n", + "P-Value: 0.039967726509492464\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.2783898305084746\n", + "T-Statistic: -3.9414887073599294\n", + "P-Value: 0.029107725320715257\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1252\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1253\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.35833333333333334\n", + "T-Statistic: -14.9760677256279\n", + "P-Value: 0.0006461747777244088\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.364406779661017\n", + "T-Statistic: -2.193378465041791\n", + "P-Value: 0.1158873271135981\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1253\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1254\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.47540983606557374\n", + "Average of Other Ratios: 0.35\n", + "T-Statistic: -4.022062624190254\n", + "P-Value: 0.027607848701928988\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.28255649717514125\n", + "T-Statistic: -3.1504339973019846\n", + "P-Value: 0.05124820108543329\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1254\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1255\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.47691256830601103\n", + "T-Statistic: -3.022304945858794\n", + "P-Value: 0.0566554359290112\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.41377118644067795\n", + "T-Statistic: -2.677569682825962\n", + "P-Value: 0.07520419613663175\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1255\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1256\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.3237704918032787\n", + "T-Statistic: -4.585573326772214\n", + "P-Value: 0.019476865011623285\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.24491525423728813\n", + "T-Statistic: -6.057634803744621\n", + "P-Value: 0.00902640078253155\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1256\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1257\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7377049180327869\n", + "Average of Other Ratios: 0.37916666666666665\n", + "T-Statistic: -6.896820529493841\n", + "P-Value: 0.006245884135338378\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.29117231638418084\n", + "T-Statistic: -4.3575806009385145\n", + "P-Value: 0.022334035979091265\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1257\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1258\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.38749999999999996\n", + "T-Statistic: -2.4730386527896364\n", + "P-Value: 0.0898220971946934\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.32916666666666666\n", + "T-Statistic: -10.509267049042842\n", + "P-Value: 0.001839820961868861\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1258\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1259\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.373155737704918\n", + "T-Statistic: -3.7564520938598576\n", + "P-Value: 0.03297009515071729\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.38333333333333336\n", + "Average of Other Ratios: 0.2966101694915254\n", + "T-Statistic: -4.298859857965993\n", + "P-Value: 0.023156605272933835\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1259\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1260\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.3278005464480874\n", + "T-Statistic: -5.076865217882905\n", + "P-Value: 0.014761211511120415\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.2870056497175142\n", + "T-Statistic: -17.37458743854924\n", + "P-Value: 0.0004155011309011471\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1260\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1261\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.31086065573770494\n", + "T-Statistic: -2.446306172576996\n", + "P-Value: 0.0919815932829521\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3050847457627119\n", + "Average of Other Ratios: 0.2303201506591337\n", + "T-Statistic: -5.066892497031914\n", + "P-Value: 0.0368133974917626\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1261\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1262\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.35833333333333334\n", + "T-Statistic: -7.2243808310002775\n", + "P-Value: 0.005468843291212087\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.2956920903954802\n", + "T-Statistic: -5.945744266829731\n", + "P-Value: 0.009512707270210679\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1262\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1263\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.35252732240437157\n", + "T-Statistic: -4.378773516969542\n", + "P-Value: 0.022046460036578352\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3220338983050847\n", + "Average of Other Ratios: 0.2750470809792844\n", + "T-Statistic: -2.0432809561528478\n", + "P-Value: 0.1777389669818575\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1263\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1264\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.37329234972677594\n", + "T-Statistic: -2.5720050130375935\n", + "P-Value: 0.08234949684139341\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3129943502824859\n", + "T-Statistic: -2.399133282225983\n", + "P-Value: 0.09594924200933898\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1264\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1265\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.36666666666666664\n", + "T-Statistic: -5.484666087385246\n", + "P-Value: 0.011921820751493945\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.3050847457627119\n", + "T-Statistic: -4.8621138936836426\n", + "P-Value: 0.016615697449670994\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1265\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1266\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.3280054644808743\n", + "T-Statistic: -14.159199715025988\n", + "P-Value: 0.0007631512846613169\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.2699858757062147\n", + "T-Statistic: -3.0417250416423958\n", + "P-Value: 0.055791220683953764\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1266\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1267\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7213114754098361\n", + "Average of Other Ratios: 0.35416666666666663\n", + "T-Statistic: -6.623110492647877\n", + "P-Value: 0.007010408974038976\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.30348399246704333\n", + "T-Statistic: -4.849951400603131\n", + "P-Value: 0.039981081861906576\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1267\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1268\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.35833333333333334\n", + "T-Statistic: -4.326302046694481\n", + "P-Value: 0.022767398907893536\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.3093220338983051\n", + "T-Statistic: -4.068509688367149\n", + "P-Value: 0.02678829448291478\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1268\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1269\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.332103825136612\n", + "T-Statistic: -10.547062196561422\n", + "P-Value: 0.0018205268185872753\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.2828389830508474\n", + "T-Statistic: -12.090909090909088\n", + "P-Value: 0.0012175905817395098\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1269\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1270\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.32916666666666666\n", + "T-Statistic: -4.41253184635759\n", + "P-Value: 0.021598198496940495\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.28276836158192087\n", + "T-Statistic: -4.398585621119437\n", + "P-Value: 0.021781940473571012\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1270\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1271\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7213114754098361\n", + "Average of Other Ratios: 0.29166666666666663\n", + "T-Statistic: -26.924962031836227\n", + "P-Value: 0.00011242244042797529\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.2784604519774011\n", + "T-Statistic: -3.6540488693517794\n", + "P-Value: 0.03539195989892865\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1271\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1272\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.3239071038251366\n", + "T-Statistic: -6.101922495764141\n", + "P-Value: 0.008842933069258837\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.35\n", + "Average of Other Ratios: 0.2754237288135593\n", + "T-Statistic: -3.195601372491276\n", + "P-Value: 0.049498206131116385\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1272\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1273\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.36448087431693993\n", + "T-Statistic: -5.276046667403815\n", + "P-Value: 0.013275421234137245\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.30357815442561203\n", + "T-Statistic: -3.206327062041345\n", + "P-Value: 0.08504610921846024\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1273\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1274\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.3691256830601093\n", + "T-Statistic: -2.611497078469241\n", + "P-Value: 0.07958218244247245\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.3347457627118644\n", + "T-Statistic: -2.6807959017177474\n", + "P-Value: 0.07499815239256642\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1274\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1275\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.3605191256830601\n", + "T-Statistic: -7.086477957097316\n", + "P-Value: 0.0057795013156923015\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.35430790960451974\n", + "T-Statistic: -2.9187879140660207\n", + "P-Value: 0.06155652231610408\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1275\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1276\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.3151639344262295\n", + "T-Statistic: -6.220646342838296\n", + "P-Value: 0.008374666202957234\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3220338983050847\n", + "Average of Other Ratios: 0.24901129943502825\n", + "T-Statistic: -2.864760573339222\n", + "P-Value: 0.06432558093621543\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1276\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1277\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.4125\n", + "T-Statistic: -4.719504703575856\n", + "P-Value: 0.01801737147532206\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5423728813559322\n", + "Average of Other Ratios: 0.3375\n", + "T-Statistic: -8.30959245705665\n", + "P-Value: 0.0036520779681317537\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1277\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1278\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.30000000000000004\n", + "T-Statistic: -4.334146739196724\n", + "P-Value: 0.022657696393709254\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.288135593220339\n", + "Average of Other Ratios: 0.22351694915254236\n", + "T-Statistic: -3.3098123366183496\n", + "P-Value: 0.04539853318110197\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1278\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1279\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.4\n", + "T-Statistic: -2.990713926985726\n", + "P-Value: 0.05809751373410958\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3161723163841808\n", + "T-Statistic: -2.483483730018013\n", + "P-Value: 0.0889952126349582\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1279\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1280\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.30416666666666664\n", + "T-Statistic: -5.440801457194902\n", + "P-Value: 0.012190900140945336\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.31666666666666665\n", + "Average of Other Ratios: 0.2754237288135593\n", + "T-Statistic: -3.8676344826299247\n", + "P-Value: 0.030575765124777585\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1280\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1281\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.3399590163934426\n", + "T-Statistic: -4.334577337946331\n", + "P-Value: 0.022651694585833295\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.35\n", + "Average of Other Ratios: 0.2966101694915254\n", + "T-Statistic: -3.6373066958946425\n", + "P-Value: 0.03580932390070352\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1281\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1282\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.3108606557377049\n", + "T-Statistic: -2.3956053387350558\n", + "P-Value: 0.0962542828406371\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3\n", + "Average of Other Ratios: 0.2584745762711864\n", + "T-Statistic: -2.869146214468688\n", + "P-Value: 0.06409507369957781\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1282\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1283\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.4145491803278689\n", + "T-Statistic: -2.833861411958958\n", + "P-Value: 0.0659792994727473\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3423728813559322\n", + "T-Statistic: -2.203747318127119\n", + "P-Value: 0.15839077808830893\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1283\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1284\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.22916666666666669\n", + "T-Statistic: -8.901067221690772\n", + "P-Value: 0.0029905765083464166\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.288135593220339\n", + "Average of Other Ratios: 0.2278954802259887\n", + "T-Statistic: -2.1351104338488116\n", + "P-Value: 0.12241769751046185\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1284\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1285\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4426229508196721\n", + "Average of Other Ratios: 0.33749999999999997\n", + "T-Statistic: -2.4314657198035565\n", + "P-Value: 0.09320783628411884\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.28269774011299437\n", + "T-Statistic: -3.521966119583988\n", + "P-Value: 0.03886239529216442\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1285\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1286\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.27329234972677596\n", + "T-Statistic: -4.855988855019134\n", + "P-Value: 0.016672924204011234\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.19077212806026367\n", + "T-Statistic: -1.9859386750277492\n", + "P-Value: 0.1854304501080007\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1286\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1287\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.25341530054644806\n", + "T-Statistic: -13.539511357945873\n", + "P-Value: 0.0008713612072207959\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.21942090395480224\n", + "T-Statistic: -37.92307692307706\n", + "P-Value: 4.033425013157966e-05\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1287\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1288\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.3208333333333333\n", + "T-Statistic: -11.206006160936766\n", + "P-Value: 0.0015233714589831572\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.2909604519774011\n", + "T-Statistic: -2.6148419405355545\n", + "P-Value: 0.07935310501190229\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1288\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1289\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.3416666666666667\n", + "T-Statistic: -3.5780996247983943\n", + "P-Value: 0.037336695763226686\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.31779661016949157\n", + "T-Statistic: -6.831300510639733\n", + "P-Value: 0.0064184892478205335\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1289\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1290\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.29583333333333334\n", + "T-Statistic: -13.315684846477943\n", + "P-Value: 0.0009154444688870522\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.23241525423728815\n", + "T-Statistic: -4.29193057069012\n", + "P-Value: 0.02325623974857282\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1290\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1291\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.3875\n", + "T-Statistic: -11.688828790772387\n", + "P-Value: 0.0013453380246460723\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3377118644067797\n", + "T-Statistic: -3.1733791192340908\n", + "P-Value: 0.0503496079687739\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1291\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1292\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.3316939890710382\n", + "T-Statistic: -5.9679306695524\n", + "P-Value: 0.009413595633304362\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3220338983050847\n", + "Average of Other Ratios: 0.2364406779661017\n", + "T-Statistic: -3.377315745886016\n", + "P-Value: 0.04317682241293437\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1292\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1293\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.4\n", + "T-Statistic: -3.3534274040500196\n", + "P-Value: 0.04394693731715465\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.3247175141242937\n", + "T-Statistic: -5.643467443294131\n", + "P-Value: 0.011010270079289456\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1293\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1294\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.3896857923497268\n", + "T-Statistic: -3.9226085660313683\n", + "P-Value: 0.029474240628051502\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.34611581920903955\n", + "T-Statistic: -7.873610453398626\n", + "P-Value: 0.004268617658520372\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1294\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1295\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.3708333333333333\n", + "T-Statistic: -2.9809200108781417\n", + "P-Value: 0.058553923379340976\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3162429378531073\n", + "T-Statistic: -2.825094808781018\n", + "P-Value: 0.06645811724840166\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1295\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1296\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.3403005464480874\n", + "T-Statistic: -2.952162851632012\n", + "P-Value: 0.05992028137117902\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.26617231638418076\n", + "T-Statistic: -2.337868480725624\n", + "P-Value: 0.10141825639016255\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1296\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1297\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7166666666666667\n", + "Average of Other Ratios: 0.2653688524590164\n", + "T-Statistic: -16.038628368958843\n", + "P-Value: 0.0005271380369548524\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.18975988700564972\n", + "T-Statistic: -3.2917415848871014\n", + "P-Value: 0.04601775946765137\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1297\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1298\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4262295081967213\n", + "Average of Other Ratios: 0.3083333333333333\n", + "T-Statistic: -3.8269224330393166\n", + "P-Value: 0.03142577924163457\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.25741525423728817\n", + "T-Statistic: -3.8324745624304253\n", + "P-Value: 0.03130808733647537\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1298\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1299\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4144125683060109\n", + "T-Statistic: -3.4211013109598394\n", + "P-Value: 0.041809228433188725\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.576271186440678\n", + "Average of Other Ratios: 0.32902542372881355\n", + "T-Statistic: -9.542627417316496\n", + "P-Value: 0.0024409490317057526\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1299\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1300\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.32916666666666666\n", + "T-Statistic: -4.849227812278989\n", + "P-Value: 0.016736391339350645\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.27860169491525427\n", + "T-Statistic: -2.340709452706567\n", + "P-Value: 0.10115640234950059\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1300\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1301\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.3152322404371585\n", + "T-Statistic: -17.69183502777838\n", + "P-Value: 0.00039371274155216776\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3220338983050847\n", + "Average of Other Ratios: 0.27005649717514124\n", + "T-Statistic: -7.413571269033491\n", + "P-Value: 0.005077486364715219\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1301\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1302\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.35833333333333334\n", + "T-Statistic: -11.68659984883048\n", + "P-Value: 0.001346094689122379\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.30798022598870056\n", + "T-Statistic: -3.889154254622231\n", + "P-Value: 0.030138373467386664\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1302\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1303\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.4\n", + "T-Statistic: -2.873384998124319\n", + "P-Value: 0.06387326471495713\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.30254237288135594\n", + "T-Statistic: -27.6666666666665\n", + "P-Value: 0.023000340589345674\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1303\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1304\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.4063524590163934\n", + "T-Statistic: -1.9862461743452617\n", + "P-Value: 0.14119767735260078\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.27464689265536724\n", + "T-Statistic: -3.0029751452311446\n", + "P-Value: 0.05753239842963967\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1304\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1305\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.31898907103825136\n", + "T-Statistic: -5.827473612902438\n", + "P-Value: 0.01006479830694792\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.24463276836158193\n", + "T-Statistic: -2.631494801492514\n", + "P-Value: 0.07822464952325936\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1305\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1306\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.3939207650273224\n", + "T-Statistic: -3.0276117316398676\n", + "P-Value: 0.05641761781506673\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.3432203389830508\n", + "T-Statistic: -2.8168388126916613\n", + "P-Value: 0.06691301311594693\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1306\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1307\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.3358606557377049\n", + "T-Statistic: -4.734425236222974\n", + "P-Value: 0.017863689846178848\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.31779661016949157\n", + "T-Statistic: -5.039254603264428\n", + "P-Value: 0.015065771599115848\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1307\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1308\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.3656420765027322\n", + "T-Statistic: -5.128769703376435\n", + "P-Value: 0.014353952607178531\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.2701271186440678\n", + "T-Statistic: -3.9729784680823\n", + "P-Value: 0.028509358260687255\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1308\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1309\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.3566939890710382\n", + "T-Statistic: -7.883059676687519\n", + "P-Value: 0.004253852045425947\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.2951977401129944\n", + "T-Statistic: -6.340155233829669\n", + "P-Value: 0.00793564788180792\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1309\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1310\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.3416666666666667\n", + "T-Statistic: -9.434713700912411\n", + "P-Value: 0.0025234427155974874\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.2615819209039548\n", + "T-Statistic: -3.256754496379134\n", + "P-Value: 0.04724737989320142\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1310\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1311\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.3041666666666667\n", + "T-Statistic: -10.096584056706222\n", + "P-Value: 0.0020692799372390666\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.2699152542372881\n", + "T-Statistic: -5.1240286090443545\n", + "P-Value: 0.014390539846078056\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1311\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1312\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4918032786885246\n", + "Average of Other Ratios: 0.43333333333333335\n", + "T-Statistic: -3.508196721311475\n", + "P-Value: 0.0392487316994922\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.37563559322033896\n", + "T-Statistic: -2.224345699469886\n", + "P-Value: 0.11258691448891019\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1312\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1313\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.35\n", + "T-Statistic: -4.813749731486455\n", + "P-Value: 0.01707463310186129\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.29783427495291903\n", + "T-Statistic: -3.5957489244818297\n", + "P-Value: 0.06938932086375887\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1313\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1314\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.29166666666666663\n", + "T-Statistic: -7.216870606867377\n", + "P-Value: 0.005485187056437409\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.18568738229755177\n", + "T-Statistic: -1.9376663567761192\n", + "P-Value: 0.19225685599558717\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1314\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1315\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.3375\n", + "T-Statistic: -6.518811541138283\n", + "P-Value: 0.007334069087384167\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3333333333333333\n", + "Average of Other Ratios: 0.2796610169491526\n", + "T-Statistic: -3.6565517048676255\n", + "P-Value: 0.035330102302749224\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1315\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1316\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.22499999999999998\n", + "T-Statistic: -7.68678962084268\n", + "P-Value: 0.004574973719624272\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.26666666666666666\n", + "Average of Other Ratios: 0.1906779661016949\n", + "T-Statistic: -2.7138510050026126\n", + "P-Value: 0.07292684266802282\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1316\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1317\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.39166666666666666\n", + "T-Statistic: -9.409277800552035\n", + "P-Value: 0.0025434230249447316\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.37570621468926557\n", + "T-Statistic: -2.8482596056990577\n", + "P-Value: 0.06520219060265608\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1317\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1318\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.3541666666666667\n", + "T-Statistic: -21.360184505383586\n", + "P-Value: 0.00022451203065569885\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.29103107344632767\n", + "T-Statistic: -4.887164066279016\n", + "P-Value: 0.016384294164034276\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1318\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1319\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.47540983606557374\n", + "Average of Other Ratios: 0.2125\n", + "T-Statistic: -6.081034149138327\n", + "P-Value: 0.008928849971299894\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23333333333333334\n", + "Average of Other Ratios: 0.1483050847457627\n", + "T-Statistic: -2.866666666666667\n", + "P-Value: 0.06422527059785652\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1319\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1320\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.3603142076502732\n", + "T-Statistic: -2.9539865796064393\n", + "P-Value: 0.05983245019134663\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.29209039548022603\n", + "T-Statistic: -7.654614504282301\n", + "P-Value: 0.016642001057945596\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1320\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1321\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.33333333333333337\n", + "T-Statistic: -8.952610373293831\n", + "P-Value: 0.0029407013393304916\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.2913135593220339\n", + "T-Statistic: -4.79001040774592\n", + "P-Value: 0.017305937126765883\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1321\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1322\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.39849726775956285\n", + "T-Statistic: -6.830325200725934\n", + "P-Value: 0.00642110575024065\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.28700564971751413\n", + "T-Statistic: -6.922217740406311\n", + "P-Value: 0.006180622722235997\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1322\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1323\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.4\n", + "T-Statistic: -3.737704918032786\n", + "P-Value: 0.03339722912081489\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3292372881355933\n", + "T-Statistic: -6.337740226443206\n", + "P-Value: 0.007944216283270866\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1323\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1324\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.47540983606557374\n", + "Average of Other Ratios: 0.2791666666666667\n", + "T-Statistic: -11.423030339825978\n", + "P-Value: 0.00143970284382488\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.2192090395480226\n", + "T-Statistic: -2.472975320624164\n", + "P-Value: 0.08982713951273004\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1324\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1325\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.39419398907103825\n", + "T-Statistic: -4.965110212591529\n", + "P-Value: 0.01569056902102536\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.33319209039548026\n", + "T-Statistic: -6.046351066103405\n", + "P-Value: 0.00907394218732086\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1325\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1326\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.3604508196721311\n", + "T-Statistic: -3.3681723780241897\n", + "P-Value: 0.043469547365275946\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.269774011299435\n", + "T-Statistic: -2.6462479326196116\n", + "P-Value: 0.0772414424024133\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1326\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1327\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.375\n", + "T-Statistic: -3.3374687113774946\n", + "P-Value: 0.04447114763467144\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.30798022598870056\n", + "T-Statistic: -6.24495382880618\n", + "P-Value: 0.008282834737265843\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1327\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1328\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.34002732240437156\n", + "T-Statistic: -9.8986749455488\n", + "P-Value: 0.0021928507964472117\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.29936440677966103\n", + "T-Statistic: -4.542638876598001\n", + "P-Value: 0.019977054761155088\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1328\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1329\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.4041666666666667\n", + "T-Statistic: -3.473480352013456\n", + "P-Value: 0.040244742555193926\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.3305084745762712\n", + "T-Statistic: -3.092031441659996\n", + "P-Value: 0.05362839188122225\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1329\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1330\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.33189890710382514\n", + "T-Statistic: -10.490828508285672\n", + "P-Value: 0.001849332165580751\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.2662429378531074\n", + "T-Statistic: -2.0881715296001104\n", + "P-Value: 0.12800003155007977\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1330\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1331\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.3065573770491803\n", + "T-Statistic: -8.898873673993194\n", + "P-Value: 0.002992723821402328\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.29936440677966103\n", + "T-Statistic: -2.5346106753467175\n", + "P-Value: 0.08507981646292648\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1331\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1332\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.2708333333333333\n", + "T-Statistic: -11.732895329579321\n", + "P-Value: 0.0013304938857159276\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.2617231638418079\n", + "T-Statistic: -4.608368949689674\n", + "P-Value: 0.019217889743118922\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1332\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1333\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.4\n", + "T-Statistic: -10.562670498616757\n", + "P-Value: 0.0018126371846850279\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3415960451977401\n", + "T-Statistic: -3.626931567328918\n", + "P-Value: 0.036071124625796494\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1333\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1334\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.4628415300546448\n", + "T-Statistic: -5.357142857142865\n", + "P-Value: 0.11748359353956558\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.38022598870056495\n", + "T-Statistic: -2.535033549783168\n", + "P-Value: 0.08504832368725805\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1334\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1335\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.37336065573770494\n", + "T-Statistic: -4.6106699178095605\n", + "P-Value: 0.019191997918002076\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3209039548022599\n", + "T-Statistic: -3.4248301792021634\n", + "P-Value: 0.04169532187000911\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1335\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1336\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.3980191256830601\n", + "T-Statistic: -2.348482665780014\n", + "P-Value: 0.10044412446307645\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3545197740112994\n", + "T-Statistic: -4.1111111111111125\n", + "P-Value: 0.02606395678518873\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1336\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1337\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.37083333333333335\n", + "T-Statistic: -5.4804035870677845\n", + "P-Value: 0.011947625433148852\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.32457627118644067\n", + "T-Statistic: -6.742138664692472\n", + "P-Value: 0.006663608853032706\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1337\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1338\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.47540983606557374\n", + "Average of Other Ratios: 0.3333333333333333\n", + "T-Statistic: -3.1843068128816943\n", + "P-Value: 0.04992864292949258\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.36666666666666664\n", + "Average of Other Ratios: 0.288135593220339\n", + "T-Statistic: -2.6750562472452653\n", + "P-Value: 0.07536520355046247\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1338\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1339\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.4083333333333333\n", + "T-Statistic: -4.59932645244697\n", + "P-Value: 0.01932008137256461\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3627118644067797\n", + "T-Statistic: -3.6442193578369366\n", + "P-Value: 0.03563624031042606\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1339\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1340\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.35833333333333334\n", + "T-Statistic: -10.845912217186935\n", + "P-Value: 0.0016770220983140147\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.28241525423728814\n", + "T-Statistic: -3.0409065774469273\n", + "P-Value: 0.0558273074325418\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1340\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1341\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4918032786885246\n", + "Average of Other Ratios: 0.2833333333333333\n", + "T-Statistic: -5.896420481369791\n", + "P-Value: 0.009738001288109581\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3050847457627119\n", + "Average of Other Ratios: 0.2319915254237288\n", + "T-Statistic: -6.013276450562215\n", + "P-Value: 0.009215200973280897\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1341\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1342\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.3812158469945356\n", + "T-Statistic: -3.7975738512734774\n", + "P-Value: 0.032057418473780255\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.34576271186440677\n", + "T-Statistic: -2.4308053376742573\n", + "P-Value: 0.09326286520252099\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1342\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1343\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.4291666666666667\n", + "T-Statistic: -30.452174863987732\n", + "P-Value: 7.779154477118327e-05\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.40091807909604515\n", + "T-Statistic: -2.985011716688378\n", + "P-Value: 0.05836269929186945\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1343\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1344\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.22499999999999998\n", + "T-Statistic: -23.027757458005887\n", + "P-Value: 0.00017938051259614428\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.288135593220339\n", + "Average of Other Ratios: 0.2192090395480226\n", + "T-Statistic: -4.357142857142859\n", + "P-Value: 0.02234002693058057\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1344\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1345\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4426229508196721\n", + "Average of Other Ratios: 0.29583333333333334\n", + "T-Statistic: -3.202682563338301\n", + "P-Value: 0.04923073087668093\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.288135593220339\n", + "Average of Other Ratios: 0.24456214689265537\n", + "T-Statistic: -2.609888745022117\n", + "P-Value: 0.07969262184669647\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1345\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1346\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.4817622950819672\n", + "T-Statistic: -4.70814887470562\n", + "P-Value: 0.018135489628524907\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.47238700564971753\n", + "T-Statistic: -1.8841000222390059\n", + "P-Value: 0.15606129783472963\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1346\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1347\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.3583333333333333\n", + "T-Statistic: -9.722669668020147\n", + "P-Value: 0.0023110927865077316\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3461864406779661\n", + "T-Statistic: -5.059912204313382\n", + "P-Value: 0.014897489104880094\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1347\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1348\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.32083333333333336\n", + "T-Statistic: -4.437542626227876\n", + "P-Value: 0.021273677300139238\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.3093220338983051\n", + "T-Statistic: -3.3757575757575755\n", + "P-Value: 0.04322652987968803\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1348\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1349\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6833333333333333\n", + "Average of Other Ratios: 0.4314890710382514\n", + "T-Statistic: -9.676632078012846\n", + "P-Value: 0.0023434092465502462\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3797316384180791\n", + "T-Statistic: -2.9703710359097775\n", + "P-Value: 0.05905056214606797\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1349\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1350\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.3208333333333333\n", + "T-Statistic: -3.604111013983922\n", + "P-Value: 0.03665562896226882\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.2362523540489642\n", + "T-Statistic: -3.955281402843921\n", + "P-Value: 0.05837978400303097\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1350\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1351\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7704918032786885\n", + "Average of Other Ratios: 0.3208333333333333\n", + "T-Statistic: -21.583606557377053\n", + "P-Value: 0.00021764701504013405\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.29764595103578156\n", + "T-Statistic: -4.346534653465347\n", + "P-Value: 0.04906822409381993\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1351\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1352\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.41666666666666663\n", + "T-Statistic: -3.2125451549484705\n", + "P-Value: 0.04886123097830394\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.2911723163841808\n", + "T-Statistic: -4.423761578539075\n", + "P-Value: 0.021451702572010452\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1352\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1353\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.3208333333333333\n", + "T-Statistic: -8.591077485213033\n", + "P-Value: 0.0033154323787321117\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.27838983050847455\n", + "T-Statistic: -3.7753252988406856\n", + "P-Value: 0.03254713415412944\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1353\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1354\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.38545081967213113\n", + "T-Statistic: -2.9267563949002504\n", + "P-Value: 0.06116082615925972\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3586158192090395\n", + "T-Statistic: -2.541573388991179\n", + "P-Value: 0.08456310051618006\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1354\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1355\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.3236338797814207\n", + "T-Statistic: -13.636558407121642\n", + "P-Value: 0.000853123683257044\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3220338983050847\n", + "Average of Other Ratios: 0.2318502824858757\n", + "T-Statistic: -2.7486274334487737\n", + "P-Value: 0.07082370041149343\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1355\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1356\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.36516393442622946\n", + "T-Statistic: -8.46907603191196\n", + "P-Value: 0.0034560758139621692\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.29936440677966103\n", + "T-Statistic: -2.1631195885543937\n", + "P-Value: 0.11922505386124203\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1356\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1357\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.36065573770491804\n", + "T-Statistic: -3.1934829897182784\n", + "P-Value: 0.04957857968168316\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.30508474576271183\n", + "T-Statistic: -12.091525958289973\n", + "P-Value: 0.0012174072403681394\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1357\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1358\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.3235655737704918\n", + "T-Statistic: -28.52854660180471\n", + "P-Value: 9.456145255444976e-05\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.29103107344632767\n", + "T-Statistic: -3.7674177162295606\n", + "P-Value: 0.03272349692653532\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1358\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1359\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.4291666666666667\n", + "T-Statistic: -2.0643127364438847\n", + "P-Value: 0.13095346493752535\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.35861581920903957\n", + "T-Statistic: -3.110819906677478\n", + "P-Value: 0.05284775046942044\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1359\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1360\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.38148907103825136\n", + "T-Statistic: -1.9632892320372335\n", + "P-Value: 0.1443881711602152\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3220338983050847\n", + "Average of Other Ratios: 0.2809792843691149\n", + "T-Statistic: -5.736842105263147\n", + "P-Value: 0.02906640150529952\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1360\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1361\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.3028005464480874\n", + "T-Statistic: -21.112622602464683\n", + "P-Value: 0.00023245973293610858\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3050847457627119\n", + "Average of Other Ratios: 0.2526365348399247\n", + "T-Statistic: -3.4171779141104293\n", + "P-Value: 0.07600312612521545\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1361\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1362\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.37083333333333335\n", + "T-Statistic: -3.54807654763531\n", + "P-Value: 0.038143015996558144\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.3220338983050848\n", + "T-Statistic: -3.53121005385469\n", + "P-Value: 0.03860574928821704\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1362\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1363\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.38155737704918036\n", + "T-Statistic: -4.0869440858831725\n", + "P-Value: 0.026471716786619498\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.314406779661017\n", + "T-Statistic: -1.800059978008079\n", + "P-Value: 0.2136563487360759\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1363\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1364\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.3941256830601093\n", + "T-Statistic: -5.534321299197799\n", + "P-Value: 0.011626515414883042\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3840395480225989\n", + "T-Statistic: -2.2968348396222584\n", + "P-Value: 0.10529360084626542\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1364\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1365\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.29583333333333334\n", + "T-Statistic: -9.689747679740668\n", + "P-Value: 0.002334141978546938\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3050847457627119\n", + "Average of Other Ratios: 0.2153954802259887\n", + "T-Statistic: -4.628137283451674\n", + "P-Value: 0.01899691387142354\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1365\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1366\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45901639344262296\n", + "Average of Other Ratios: 0.32083333333333336\n", + "T-Statistic: -2.9505777976622465\n", + "P-Value: 0.059996749206154006\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.2701271186440678\n", + "T-Statistic: -2.5462474269165756\n", + "P-Value: 0.08421839435807502\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1366\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1367\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.3375\n", + "T-Statistic: -3.780327868852458\n", + "P-Value: 0.03243619070712851\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.36666666666666664\n", + "Average of Other Ratios: 0.288135593220339\n", + "T-Statistic: -3.421943449225448\n", + "P-Value: 0.04178346889141796\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1367\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1368\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.375\n", + "T-Statistic: -3.8488075064770464\n", + "P-Value: 0.03096513128016824\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.29992937853107343\n", + "T-Statistic: -1.9390607807005154\n", + "P-Value: 0.1478476458083794\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1368\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1369\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45901639344262296\n", + "Average of Other Ratios: 0.3416666666666667\n", + "T-Statistic: -4.878136536726772\n", + "P-Value: 0.01646720054964036\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.26935028248587567\n", + "T-Statistic: -1.643431635388741\n", + "P-Value: 0.34799773853532184\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1369\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1370\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.3066256830601093\n", + "T-Statistic: -3.3297523730648932\n", + "P-Value: 0.044727459212230945\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3220338983050847\n", + "Average of Other Ratios: 0.22761299435028248\n", + "T-Statistic: -4.4183200488016485\n", + "P-Value: 0.021522528086765566\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1370\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1371\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.35252732240437157\n", + "T-Statistic: -2.5701626159312543\n", + "P-Value: 0.08248147008467596\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.2528248587570622\n", + "T-Statistic: -5.471809142918229\n", + "P-Value: 0.011999877727690598\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1371\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1372\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.36666666666666664\n", + "T-Statistic: -5.403746742275941\n", + "P-Value: 0.012424423736674949\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.3093220338983051\n", + "T-Statistic: -13.229901852633054\n", + "P-Value: 0.0009331248455100845\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1372\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1373\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.41857923497267757\n", + "T-Statistic: -2.837173206379461\n", + "P-Value: 0.06579953452896789\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.3474576271186441\n", + "T-Statistic: -4.715027198381945\n", + "P-Value: 0.018063824737018258\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1373\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1374\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.2981785063752277\n", + "T-Statistic: -9.104294478527612\n", + "P-Value: 0.011850419592490571\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3050847457627119\n", + "Average of Other Ratios: 0.2699858757062147\n", + "T-Statistic: -5.869851287372005\n", + "P-Value: 0.009862263796097724\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1374\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1375\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.3736338797814208\n", + "T-Statistic: -3.6168425013226613\n", + "P-Value: 0.03632805703920555\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.36666666666666664\n", + "Average of Other Ratios: 0.2923728813559322\n", + "T-Statistic: -5.133234383709284\n", + "P-Value: 0.014319609315535732\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1375\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1376\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7377049180327869\n", + "Average of Other Ratios: 0.3458333333333333\n", + "T-Statistic: -9.297077569799361\n", + "P-Value: 0.0026341027851884773\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.3040960451977401\n", + "T-Statistic: -2.8752211601887643\n", + "P-Value: 0.06377747883646023\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1376\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1377\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.4041666666666667\n", + "T-Statistic: -3.0118665582406012\n", + "P-Value: 0.05712690752613409\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.3248587570621469\n", + "T-Statistic: -1.9896995023342197\n", + "P-Value: 0.14072495503830776\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1377\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1378\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.3567622950819672\n", + "T-Statistic: -2.922322679137377\n", + "P-Value: 0.061380599938098745\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.2953389830508475\n", + "T-Statistic: -4.1824309711049805\n", + "P-Value: 0.024906888121385386\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1378\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1379\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.3567622950819672\n", + "T-Statistic: -8.560154259731128\n", + "P-Value: 0.003350353702966942\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.27429378531073445\n", + "T-Statistic: -4.389736277617274\n", + "P-Value: 0.021899581152473327\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1379\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1380\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4426229508196721\n", + "Average of Other Ratios: 0.3958333333333333\n", + "T-Statistic: -2.1349234997177082\n", + "P-Value: 0.12243934625158044\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.30360169491525424\n", + "T-Statistic: -6.426920856371511\n", + "P-Value: 0.007635697442918866\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1380\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1381\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.3274590163934426\n", + "T-Statistic: -7.792967812252545\n", + "P-Value: 0.004397428835019784\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.2955508474576271\n", + "T-Statistic: -3.519233773599044\n", + "P-Value: 0.03893867159865646\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1381\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1382\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4262295081967213\n", + "Average of Other Ratios: 0.35833333333333334\n", + "T-Statistic: -2.3199911010934016\n", + "P-Value: 0.10308502717775488\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3082627118644068\n", + "T-Statistic: -4.022350177849458\n", + "P-Value: 0.02760267623485285\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1382\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1383\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.38155737704918036\n", + "T-Statistic: -4.19677462214937\n", + "P-Value: 0.02468223424934266\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.34821092278719395\n", + "T-Statistic: -4.510204081632659\n", + "P-Value: 0.045808057902316804\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1383\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1384\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.35833333333333334\n", + "T-Statistic: -9.637017116211096\n", + "P-Value: 0.002371698207192692\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3258003766478343\n", + "T-Statistic: -4.913538149119954\n", + "P-Value: 0.039012347759055314\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1384\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1385\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.28724954462659374\n", + "T-Statistic: -20.478625837697965\n", + "P-Value: 0.0023760107227863043\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3050847457627119\n", + "Average of Other Ratios: 0.24887005649717514\n", + "T-Statistic: -3.297939899442541\n", + "P-Value: 0.04580416394678644\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1385\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1386\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.39419398907103825\n", + "T-Statistic: -23.001886718608972\n", + "P-Value: 0.00017998371795611563\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.29209039548022603\n", + "T-Statistic: -9.26047768699887\n", + "P-Value: 0.011460849730983888\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1386\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1387\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.32916666666666666\n", + "T-Statistic: -6.6941484555943065\n", + "P-Value: 0.006800654936853843\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3250706214689265\n", + "T-Statistic: -4.167794334654786\n", + "P-Value: 0.025138858812902672\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1387\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1388\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.37916666666666665\n", + "T-Statistic: -10.355237394824906\n", + "P-Value: 0.001921317535318784\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3121468926553672\n", + "T-Statistic: -3.4545091251635194\n", + "P-Value: 0.04080264000619044\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1388\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1389\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.4269808743169399\n", + "T-Statistic: -3.4667216700978054\n", + "P-Value: 0.04044237885506118\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.385593220338983\n", + "T-Statistic: -5.066666666666667\n", + "P-Value: 0.014842997288171941\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1389\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1390\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.3236338797814208\n", + "T-Statistic: -3.3358653417919237\n", + "P-Value: 0.04452425266445473\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.2530367231638418\n", + "T-Statistic: -3.0699274770848146\n", + "P-Value: 0.05456547495842803\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1390\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1391\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.28565573770491803\n", + "T-Statistic: -2.3900038580000214\n", + "P-Value: 0.0967410413896404\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3050847457627119\n", + "Average of Other Ratios: 0.2402542372881356\n", + "T-Statistic: -2.253658340717321\n", + "P-Value: 0.10956651809067314\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1391\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1392\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7\n", + "Average of Other Ratios: 0.3323087431693989\n", + "T-Statistic: -10.546963974844278\n", + "P-Value: 0.001820576611444715\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.3220338983050848\n", + "T-Statistic: -2.732807218615621\n", + "P-Value: 0.07177097972724268\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1392\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1393\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.39166666666666666\n", + "T-Statistic: -12.616523095570006\n", + "P-Value: 0.0010737810472414555\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.32069209039548024\n", + "T-Statistic: -7.1384266143873925\n", + "P-Value: 0.005659809403554773\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1393\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1394\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.331511839708561\n", + "T-Statistic: -2.9439949370436143\n", + "P-Value: 0.09860789571567317\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3050847457627119\n", + "Average of Other Ratios: 0.24152542372881355\n", + "T-Statistic: -5.9603956067927\n", + "P-Value: 0.027012812355076195\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1394\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1395\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.819672131147541\n", + "Average of Other Ratios: 0.325\n", + "T-Statistic: -13.385460331466769\n", + "P-Value: 0.0009013902444265222\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.29145480225988696\n", + "T-Statistic: -3.4168337494506282\n", + "P-Value: 0.04194007659421182\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1395\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1396\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.19508196721311474\n", + "T-Statistic: -6.738703907451196\n", + "P-Value: 0.00667329610025673\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.1864406779661017\n", + "Average of Other Ratios: 0.13495762711864406\n", + "T-Statistic: -2.257918405893237\n", + "P-Value: 0.10913572969388878\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1396\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1397\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7213114754098361\n", + "Average of Other Ratios: 0.31666666666666665\n", + "T-Statistic: -9.776873289560362\n", + "P-Value: 0.0022737966110181968\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3050847457627119\n", + "Average of Other Ratios: 0.23644067796610171\n", + "T-Statistic: -3.233110040049279\n", + "P-Value: 0.04810198076292131\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1397\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1398\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.2782103825136612\n", + "T-Statistic: -6.842713959859326\n", + "P-Value: 0.006387974012842435\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.27429378531073445\n", + "T-Statistic: -2.907350507765195\n", + "P-Value: 0.062130092881679595\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1398\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1399\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6833333333333333\n", + "Average of Other Ratios: 0.31058743169398906\n", + "T-Statistic: -7.0022122373829845\n", + "P-Value: 0.005980846181537664\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.288135593220339\n", + "Average of Other Ratios: 0.24053672316384178\n", + "T-Statistic: -2.365917408343781\n", + "P-Value: 0.0988685692030821\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1399\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1400\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.2774590163934426\n", + "T-Statistic: -7.817198672088378\n", + "P-Value: 0.004358191490211607\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.24915254237288134\n", + "T-Statistic: -3.4558397655933613\n", + "P-Value: 0.04076318933119888\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1400\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1401\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.37916666666666665\n", + "T-Statistic: -3.7510920776385794\n", + "P-Value: 0.033091499306512666\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.34611581920903955\n", + "T-Statistic: -2.894903284380277\n", + "P-Value: 0.06276192114601883\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1401\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1402\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.41666666666666663\n", + "T-Statistic: -5.0158308909899345\n", + "P-Value: 0.015259597669989054\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.34201977401129946\n", + "T-Statistic: -2.394619302503736\n", + "P-Value: 0.09633975086570609\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1402\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1403\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.3396174863387978\n", + "T-Statistic: -3.020014584938689\n", + "P-Value: 0.05675846509350086\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.27853107344632766\n", + "T-Statistic: -5.181815900134804\n", + "P-Value: 0.013952747029258906\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1403\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1404\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.35416666666666663\n", + "T-Statistic: -6.420806830530328\n", + "P-Value: 0.0076563407117634\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.3686440677966102\n", + "T-Statistic: -4.46962233410428\n", + "P-Value: 0.020866618588713408\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1404\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1405\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.42704918032786887\n", + "T-Statistic: -3.2041931426214503\n", + "P-Value: 0.04917390886551068\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3754943502824859\n", + "T-Statistic: -3.4249037287283453\n", + "P-Value: 0.041693079107582236\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1405\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1406\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.38995901639344266\n", + "T-Statistic: -3.129964263210284\n", + "P-Value: 0.0520669925513877\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3121468926553672\n", + "T-Statistic: -9.141660034508423\n", + "P-Value: 0.00276691986404339\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1406\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1407\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7\n", + "Average of Other Ratios: 0.44002732240437165\n", + "T-Statistic: -7.029706101430858\n", + "P-Value: 0.005914150507692506\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3968926553672316\n", + "T-Statistic: -2.528902694294367\n", + "P-Value: 0.08550631275341908\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1407\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1408\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.3360655737704918\n", + "T-Statistic: -6.662521026761018\n", + "P-Value: 0.006893006911496739\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.25\n", + "T-Statistic: -14.066533632030769\n", + "P-Value: 0.0007781509499729613\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1408\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1409\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.41120218579234974\n", + "T-Statistic: -4.744558099428093\n", + "P-Value: 0.01776029146137704\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3584039548022599\n", + "T-Statistic: -3.1685329501250488\n", + "P-Value: 0.050537730061605154\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1409\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1410\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7377049180327869\n", + "Average of Other Ratios: 0.3916666666666667\n", + "T-Statistic: -11.23243861284888\n", + "P-Value: 0.0015128416306198192\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.38848870056497176\n", + "T-Statistic: -3.0811823304553716\n", + "P-Value: 0.05408577968425568\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1410\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1411\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.4360655737704918\n", + "T-Statistic: -4.896859345148878\n", + "P-Value: 0.01629585933682827\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.40529661016949153\n", + "T-Statistic: -3.1510846069279967\n", + "P-Value: 0.05122244392981766\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1411\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1412\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.41065573770491803\n", + "T-Statistic: -3.340359673429475\n", + "P-Value: 0.04437559930810233\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.2995762711864407\n", + "T-Statistic: -6.934810075795833\n", + "P-Value: 0.006148597978661057\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1412\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1413\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.4083333333333333\n", + "T-Statistic: -3.970491803278687\n", + "P-Value: 0.02855603110389197\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.34180790960451973\n", + "T-Statistic: -6.35085296108589\n", + "P-Value: 0.007897838244939661\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1413\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1414\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.375\n", + "T-Statistic: -4.954036545154963\n", + "P-Value: 0.015786763366299392\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.34209039548022596\n", + "T-Statistic: -5.7423119859693115\n", + "P-Value: 0.010488586403531558\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1414\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1415\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.439275956284153\n", + "T-Statistic: -2.5509371831068104\n", + "P-Value: 0.0838742637314232\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.3686440677966102\n", + "T-Statistic: -6.342784542369049\n", + "P-Value: 0.007926332946572029\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1415\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1416\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.37083333333333335\n", + "T-Statistic: -12.587812138042343\n", + "P-Value: 0.001081035521070716\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3\n", + "T-Statistic: -4.012310343727843\n", + "P-Value: 0.027784012310922687\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1416\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1417\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.42083333333333334\n", + "T-Statistic: -13.599864280438059\n", + "P-Value: 0.000859959220110807\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.4219632768361582\n", + "T-Statistic: -9.741023075752745\n", + "P-Value: 0.0022983741002138604\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1417\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1418\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.49330601092896176\n", + "T-Statistic: -2.4421197772396783\n", + "P-Value: 0.09232550216594695\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.4518361581920904\n", + "T-Statistic: -2.3115409557535824\n", + "P-Value: 0.10388441475596431\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1418\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1419\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.3809653916211293\n", + "T-Statistic: -1.5855147752916146\n", + "P-Value: 0.25372930058134713\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.31257062146892656\n", + "T-Statistic: -2.208880071004421\n", + "P-Value: 0.11422087778664086\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1419\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1420\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7213114754098361\n", + "Average of Other Ratios: 0.32083333333333336\n", + "T-Statistic: -18.273075151598423\n", + "P-Value: 0.0003575799805904938\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3671610169491525\n", + "T-Statistic: -3.9629629629629703\n", + "P-Value: 0.02869794241127541\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1420\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1421\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.4062841530054645\n", + "T-Statistic: -6.551255287390875\n", + "P-Value: 0.007231321491454494\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3545197740112994\n", + "T-Statistic: -4.900769721140663\n", + "P-Value: 0.01626036695364968\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1421\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1422\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.425\n", + "T-Statistic: -3.0865517657726187\n", + "P-Value: 0.05385879831750464\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.4194915254237288\n", + "T-Statistic: -2.0960998088359606\n", + "P-Value: 0.12703618205803208\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1422\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1423\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.819672131147541\n", + "Average of Other Ratios: 0.3875\n", + "T-Statistic: -15.69614416660204\n", + "P-Value: 0.000562059046312007\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.4010593220338983\n", + "T-Statistic: -4.205211154248234\n", + "P-Value: 0.024551320079546985\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1423\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1424\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.4394808743169399\n", + "T-Statistic: -6.100988988496524\n", + "P-Value: 0.008846749505079436\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.4175141242937853\n", + "T-Statistic: -2.3260164970888964\n", + "P-Value: 0.10251957254301898\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1424\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1425\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6833333333333333\n", + "Average of Other Ratios: 0.4354508196721312\n", + "T-Statistic: -8.022565688606122\n", + "P-Value: 0.004043498406787921\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3879237288135593\n", + "T-Statistic: -2.9208912467506574\n", + "P-Value: 0.06145176525841497\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1425\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1426\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.45833333333333337\n", + "T-Statistic: -7.5244830164877445\n", + "P-Value: 0.004865119180322468\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.41101694915254233\n", + "T-Statistic: -2.231327750334267\n", + "P-Value: 0.11185847276008758\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1426\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1427\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.3650273224043715\n", + "T-Statistic: -11.968876772692552\n", + "P-Value: 0.0012545933074113616\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.2995056497175141\n", + "T-Statistic: -2.210333134870083\n", + "P-Value: 0.11406615150921069\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1427\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1428\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.32916666666666666\n", + "T-Statistic: -6.2514540023238165\n", + "P-Value: 0.008258502009868794\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.29774011299435027\n", + "T-Statistic: -3.952777215734684\n", + "P-Value: 0.05844741686027941\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1428\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1429\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.38176229508196724\n", + "T-Statistic: -9.8479960675198\n", + "P-Value: 0.0022260589594432483\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.3432203389830508\n", + "T-Statistic: -3.021617032571375\n", + "P-Value: 0.05668635618881769\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1429\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1430\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.40710382513661203\n", + "T-Statistic: -3.2566855663164747\n", + "P-Value: 0.047249843238845575\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3079802259887006\n", + "T-Statistic: -34.12195121951229\n", + "P-Value: 5.5338575696591484e-05\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1430\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1431\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.44781420765027324\n", + "T-Statistic: -2.777298311206574\n", + "P-Value: 0.0691462057873957\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.4110169491525424\n", + "T-Statistic: -6.858659644654502\n", + "P-Value: 0.006345660358547117\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1431\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1432\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.38749999999999996\n", + "T-Statistic: -4.973957462489661\n", + "P-Value: 0.015614263014698614\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.33799435028248587\n", + "T-Statistic: -1.8646963468092161\n", + "P-Value: 0.15908733357598287\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1432\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1433\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.4034608378870674\n", + "T-Statistic: -5.908250072050177\n", + "P-Value: 0.027472183908806763\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.39887005649717516\n", + "T-Statistic: -2.8461538461538454\n", + "P-Value: 0.1044600519927322\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1433\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1434\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.41933060109289616\n", + "T-Statistic: -6.341832402564622\n", + "P-Value: 0.007929704462389898\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.3629237288135593\n", + "T-Statistic: -10.808536082221455\n", + "P-Value: 0.0016941290870239134\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1434\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1435\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.4229508196721311\n", + "T-Statistic: -4.809523809523813\n", + "P-Value: 0.017115512915415635\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3291666666666667\n", + "T-Statistic: -5.281280870229153\n", + "P-Value: 0.01323906907450259\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1435\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1436\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.3479052823315118\n", + "T-Statistic: -5.862500000000004\n", + "P-Value: 0.02788479615419901\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.2950564971751412\n", + "T-Statistic: -2.5561253487432714\n", + "P-Value: 0.08349557275673589\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1436\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1437\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.44166666666666665\n", + "T-Statistic: -6.172915501073344\n", + "P-Value: 0.008558915834601189\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.37132768361581925\n", + "T-Statistic: -12.168997094322684\n", + "P-Value: 0.0011946701728581847\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1437\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1438\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.425\n", + "T-Statistic: -2.836410863828114\n", + "P-Value: 0.06584086048198345\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3586158192090395\n", + "T-Statistic: -3.8295559807933683\n", + "P-Value: 0.03136988366637992\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1438\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1439\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.40607923497267756\n", + "T-Statistic: -3.337367049030468\n", + "P-Value: 0.044474512395329985\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3669491525423728\n", + "T-Statistic: -5.570197507360285\n", + "P-Value: 0.01141908670501291\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1439\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1440\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7377049180327869\n", + "Average of Other Ratios: 0.3125\n", + "T-Statistic: -11.156636603647863\n", + "P-Value: 0.0015433006224139084\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.30819209039548023\n", + "T-Statistic: -2.607416226140155\n", + "P-Value: 0.07986277130041312\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1440\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1441\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.39999999999999997\n", + "T-Statistic: -2.3612943568800553\n", + "P-Value: 0.09928340428129473\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.3813559322033898\n", + "T-Statistic: -4.548172491877701\n", + "P-Value: 0.019911659585532055\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1441\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1442\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.3940573770491803\n", + "T-Statistic: -7.029354227468431\n", + "P-Value: 0.005914997890496912\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.29124293785310734\n", + "T-Statistic: -3.602392726796967\n", + "P-Value: 0.036700127748095945\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1442\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1443\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.4853142076502732\n", + "T-Statistic: -3.218376091512211\n", + "P-Value: 0.04864442449363043\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3882062146892656\n", + "T-Statistic: -7.414941042518773\n", + "P-Value: 0.00507478976632389\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1443\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1444\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.3277322404371585\n", + "T-Statistic: -7.5308054776588165\n", + "P-Value: 0.004853369315234331\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.27888418079096045\n", + "T-Statistic: -2.9282749062301643\n", + "P-Value: 0.06108578193839565\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1444\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1445\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.4401639344262295\n", + "T-Statistic: -2.2348801205435334\n", + "P-Value: 0.11149003001722149\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3502824858757062\n", + "T-Statistic: -6.487446070815471\n", + "P-Value: 0.007435234784132052\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1445\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1446\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7704918032786885\n", + "Average of Other Ratios: 0.38333333333333336\n", + "T-Statistic: -9.35437505152399\n", + "P-Value: 0.0025872696994375557\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.4301553672316384\n", + "T-Statistic: -2.6869021956079524\n", + "P-Value: 0.07461008165667392\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1446\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1447\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.35273224043715845\n", + "T-Statistic: -2.771421867769551\n", + "P-Value: 0.06948596977369918\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.3220338983050848\n", + "T-Statistic: -2.888948165920659\n", + "P-Value: 0.06306704716766874\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1447\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1448\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.3916666666666667\n", + "T-Statistic: -10.246669168835966\n", + "P-Value: 0.0019816576756549577\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3163841807909605\n", + "T-Statistic: -13.063945294843638\n", + "P-Value: 0.0009686387721898685\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1448\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1449\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.3605191256830601\n", + "T-Statistic: -5.027382355038384\n", + "P-Value: 0.015163608282268311\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.2754237288135593\n", + "T-Statistic: -11.666666666666663\n", + "P-Value: 0.007266951354550622\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1449\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1450\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.4354508196721312\n", + "T-Statistic: -2.2311148149182007\n", + "P-Value: 0.11188060441585682\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.41779661016949154\n", + "T-Statistic: -2.9499240992842286\n", + "P-Value: 0.060028321215766844\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1450\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1451\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.385655737704918\n", + "T-Statistic: -7.0347669547746365\n", + "P-Value: 0.005901980635791814\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.364406779661017\n", + "T-Statistic: -2.193378465041791\n", + "P-Value: 0.1158873271135981\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1451\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1452\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4918032786885246\n", + "Average of Other Ratios: 0.38749999999999996\n", + "T-Statistic: -2.929866094565433\n", + "P-Value: 0.06100726967849578\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.364406779661017\n", + "T-Statistic: -3.2485009805585183\n", + "P-Value: 0.04754349406047413\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1452\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1453\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7213114754098361\n", + "Average of Other Ratios: 0.35\n", + "T-Statistic: -20.62605763263766\n", + "P-Value: 0.0002492064346505477\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3038841807909604\n", + "T-Statistic: -7.310266791966458\n", + "P-Value: 0.0052864231861055435\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1453\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1454\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.375\n", + "T-Statistic: -3.46728445878972\n", + "P-Value: 0.040425874782353266\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.303954802259887\n", + "T-Statistic: -5.811865258054232\n", + "P-Value: 0.010140767780751686\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1454\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1455\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.3833333333333333\n", + "T-Statistic: -5.862266729790863\n", + "P-Value: 0.00989811735639771\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3931261770244821\n", + "T-Statistic: -2.400829779247631\n", + "P-Value: 0.1383734677102449\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1455\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1456\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.3875\n", + "T-Statistic: -4.478018111973132\n", + "P-Value: 0.02076175387541808\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3542372881355932\n", + "T-Statistic: -2.708482575649219\n", + "P-Value: 0.0732583731798295\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1456\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1457\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.40225409836065573\n", + "T-Statistic: -3.1045263606689577\n", + "P-Value: 0.05310763670073003\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3334039548022599\n", + "T-Statistic: -3.7816368562219433\n", + "P-Value: 0.0324072409590771\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1457\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1458\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.39371584699453555\n", + "T-Statistic: -2.5092989392309506\n", + "P-Value: 0.08699122587945884\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3794491525423729\n", + "T-Statistic: -2.2802263709416946\n", + "P-Value: 0.10691298923861156\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1458\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1459\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.3458333333333333\n", + "T-Statistic: -7.3958473467775\n", + "P-Value: 0.005112549560971972\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.35416666666666663\n", + "T-Statistic: -2.7464616338694436\n", + "P-Value: 0.07095246240968993\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1459\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1460\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45901639344262296\n", + "Average of Other Ratios: 0.39999999999999997\n", + "T-Statistic: -2.7428341074780405\n", + "P-Value: 0.07116877964790087\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.3728813559322034\n", + "T-Statistic: -2.118915741970083\n", + "P-Value: 0.1243102693621582\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1460\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1461\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.4083333333333333\n", + "T-Statistic: -3.7454630727011047\n", + "P-Value: 0.03321961329360707\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.37153954802259886\n", + "T-Statistic: -2.386591892485171\n", + "P-Value: 0.09703900448133006\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1461\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1462\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.3815573770491803\n", + "T-Statistic: -5.86945467255433\n", + "P-Value: 0.009864134442699606\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3502824858757062\n", + "T-Statistic: -8.101627221513192\n", + "P-Value: 0.003930361543241993\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1462\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1463\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.35\n", + "T-Statistic: -2.4617916243617888\n", + "P-Value: 0.09072301062363927\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.33742937853107347\n", + "T-Statistic: -3.6447947158122775\n", + "P-Value: 0.035621882387353185\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1463\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1464\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.3666666666666667\n", + "T-Statistic: -4.8396027027399\n", + "P-Value: 0.016827287331982445\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.2951977401129944\n", + "T-Statistic: -9.61483104773844\n", + "P-Value: 0.002387739072267894\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1464\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1465\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.38155737704918036\n", + "T-Statistic: -4.355025558660401\n", + "P-Value: 0.02236903359688658\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.31207627118644066\n", + "T-Statistic: -4.941615370705469\n", + "P-Value: 0.015895579161855126\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1465\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1466\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.47540983606557374\n", + "Average of Other Ratios: 0.39999999999999997\n", + "T-Statistic: -3.694312398951678\n", + "P-Value: 0.03441340625590583\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.35903954802259885\n", + "T-Statistic: -3.4967484138936817\n", + "P-Value: 0.03957367708757962\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1466\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1467\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.375\n", + "T-Statistic: -8.04851009961339\n", + "P-Value: 0.004005903148525875\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.36864406779661013\n", + "T-Statistic: -5.216423434339635\n", + "P-Value: 0.013698860156045016\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1467\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1468\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.4166666666666667\n", + "T-Statistic: -3.9653624934399807\n", + "P-Value: 0.028652615314166956\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.3813559322033898\n", + "T-Statistic: -10.623244953089115\n", + "P-Value: 0.0017824441583808043\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1468\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1469\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.3541666666666667\n", + "T-Statistic: -5.938681961594909\n", + "P-Value: 0.009544543156865669\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.2919020715630885\n", + "T-Statistic: -2.841519981956887\n", + "P-Value: 0.10474932255486497\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1469\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1470\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.4\n", + "T-Statistic: -3.9840670207259876\n", + "P-Value: 0.028302424799336666\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.33681732580037665\n", + "T-Statistic: -2.5438493495451917\n", + "P-Value: 0.12598335802475616\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1470\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1471\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.3528688524590164\n", + "T-Statistic: -12.365301190960757\n", + "P-Value: 0.001139527243227325\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3456920903954802\n", + "T-Statistic: -3.165877220586101\n", + "P-Value: 0.0506411985161682\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1471\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1472\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.37083333333333335\n", + "T-Statistic: -7.995723638492874\n", + "P-Value: 0.004082886586878434\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.29774011299435027\n", + "T-Statistic: -5.144479879486528\n", + "P-Value: 0.03576976981268908\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1472\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1473\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.5458333333333334\n", + "T-Statistic: -3.518727928731812\n", + "P-Value: 0.03895281371803606\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.4957627118644068\n", + "T-Statistic: -4.919999999999999\n", + "P-Value: 0.016087280685965905\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1473\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1474\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.3894125683060109\n", + "T-Statistic: -4.405715742078374\n", + "P-Value: 0.021687749143792793\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.36292372881355933\n", + "T-Statistic: -2.9112467789048893\n", + "P-Value: 0.06193395217425993\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1474\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1475\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.36250000000000004\n", + "T-Statistic: -3.7715052757850085\n", + "P-Value: 0.032632179601749925\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.28721751412429375\n", + "T-Statistic: -4.6372424549345395\n", + "P-Value: 0.018896243004971457\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1475\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1476\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.360724043715847\n", + "T-Statistic: -4.619815242020446\n", + "P-Value: 0.01908953586981084\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3077683615819209\n", + "T-Statistic: -3.416406029443321\n", + "P-Value: 0.04195321965391951\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1476\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1477\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.41475409836065574\n", + "T-Statistic: -5.369818568286147\n", + "P-Value: 0.012643397039975352\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3416666666666667\n", + "T-Statistic: -6.968395313620221\n", + "P-Value: 0.006064246319147875\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1477\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1478\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6833333333333333\n", + "Average of Other Ratios: 0.4059426229508197\n", + "T-Statistic: -4.851990369531891\n", + "P-Value: 0.016710420799139213\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.325\n", + "T-Statistic: -2.3275547317374796\n", + "P-Value: 0.10237581960629237\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1478\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1479\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4314207650273224\n", + "T-Statistic: -5.6998934006029405\n", + "P-Value: 0.01070840976220116\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.38201506591337103\n", + "T-Statistic: -2.838985414963086\n", + "P-Value: 0.10490802421935423\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1479\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1480\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.45642076502732243\n", + "T-Statistic: -8.972601513715789\n", + "P-Value: 0.0029216534818719115\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3797316384180791\n", + "T-Statistic: -6.404709068596953\n", + "P-Value: 0.007711044857755044\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1480\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1481\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7704918032786885\n", + "Average of Other Ratios: 0.3208333333333333\n", + "T-Statistic: -14.29409263370358\n", + "P-Value: 0.0007419958228994995\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.2909604519774011\n", + "T-Statistic: -3.081578172139684\n", + "P-Value: 0.054069005258383575\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1481\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1482\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7213114754098361\n", + "Average of Other Ratios: 0.4166666666666667\n", + "T-Statistic: -9.335911788157421\n", + "P-Value: 0.002602239684135932\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.40254237288135597\n", + "T-Statistic: -5.266283362867523\n", + "P-Value: 0.013343577598251742\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1482\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1483\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.3610655737704918\n", + "T-Statistic: -3.939648677119553\n", + "P-Value: 0.029143186739241125\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3587570621468927\n", + "T-Statistic: -3.544745038970269\n", + "P-Value: 0.038233853222009985\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1483\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1484\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.4394808743169399\n", + "T-Statistic: -2.7199087833206432\n", + "P-Value: 0.07255497251636857\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3248587570621469\n", + "T-Statistic: -2.2831482556870473\n", + "P-Value: 0.10662592307782301\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1484\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1485\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.32766393442622954\n", + "T-Statistic: -7.616943405099432\n", + "P-Value: 0.004696954329420175\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.36666666666666664\n", + "Average of Other Ratios: 0.2923728813559322\n", + "T-Statistic: -4.252457958970371\n", + "P-Value: 0.023834502002819766\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1485\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1486\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.41454918032786886\n", + "T-Statistic: -4.203244682830465\n", + "P-Value: 0.024581754588030094\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.40084745762711865\n", + "T-Statistic: -3.941176470588236\n", + "P-Value: 0.029113738898400077\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1486\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1487\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.36666666666666664\n", + "T-Statistic: -8.088416484282563\n", + "P-Value: 0.003948972398783288\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.32090395480225986\n", + "T-Statistic: -5.31174648930179\n", + "P-Value: 0.013030041853782981\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1487\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1488\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.35601092896174863\n", + "T-Statistic: -2.719036894075117\n", + "P-Value: 0.07260835021632396\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.3204802259887006\n", + "T-Statistic: -2.1906690113295393\n", + "P-Value: 0.11618158740373133\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1488\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1489\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.3983606557377049\n", + "T-Statistic: -10.650424052370854\n", + "P-Value: 0.001769113584875861\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.325\n", + "T-Statistic: -3.9643008779307634\n", + "P-Value: 0.028672657850254017\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1489\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1490\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.43558743169398906\n", + "T-Statistic: -2.9888817184112546\n", + "P-Value: 0.058182556819309464\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3544491525423729\n", + "T-Statistic: -7.463121733660972\n", + "P-Value: 0.00498112898453179\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1490\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1491\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.3937841530054645\n", + "T-Statistic: -4.000584919681885\n", + "P-Value: 0.027997738989114628\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.3686440677966102\n", + "T-Statistic: -4.69122943243946\n", + "P-Value: 0.018313350909407743\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1491\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1492\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.3958333333333333\n", + "T-Statistic: -5.43407451169032\n", + "P-Value: 0.012232865280609554\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3626412429378531\n", + "T-Statistic: -2.5444250028895907\n", + "P-Value: 0.08435259068095222\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1492\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1493\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7213114754098361\n", + "Average of Other Ratios: 0.5166666666666667\n", + "T-Statistic: -5.7882292798767665\n", + "P-Value: 0.010257246511227655\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.576271186440678\n", + "Average of Other Ratios: 0.4848870056497175\n", + "T-Statistic: -2.3523540362420694\n", + "P-Value: 0.10009165403065308\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1493\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1494\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.3567622950819672\n", + "T-Statistic: -20.143694223280193\n", + "P-Value: 0.00026743218087057065\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.303954802259887\n", + "T-Statistic: -3.517707919348613\n", + "P-Value: 0.038981350469306286\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1494\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1495\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.3688524590163934\n", + "T-Statistic: -2.2635535190344163\n", + "P-Value: 0.10856902810055359\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3038135593220339\n", + "T-Statistic: -4.198379191556001\n", + "P-Value: 0.024657266011800877\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1495\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1496\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.3791666666666667\n", + "T-Statistic: -5.261267526539542\n", + "P-Value: 0.013378770206243135\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.32923728813559316\n", + "T-Statistic: -5.019825255742889\n", + "P-Value: 0.015226316305783647\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1496\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1497\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.4192622950819672\n", + "T-Statistic: -4.998024144164243\n", + "P-Value: 0.01540912351567112\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.33757062146892663\n", + "T-Statistic: -1.955570219094377\n", + "P-Value: 0.18968621213880818\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1497\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1498\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.4185792349726776\n", + "T-Statistic: -4.200526985325906\n", + "P-Value: 0.024623895827933763\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.4048728813559322\n", + "T-Statistic: -5.068605007676937\n", + "P-Value: 0.014827407662859303\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1498\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1499\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.3777322404371585\n", + "T-Statistic: -3.6747652949491756\n", + "P-Value: 0.034884091484984066\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3425612052730697\n", + "T-Statistic: -2.4888108666663245\n", + "P-Value: 0.13056092060471672\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1499\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1500\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.3916666666666667\n", + "T-Statistic: -27.593419053471173\n", + "P-Value: 0.00010447312619049549\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.37146892655367236\n", + "T-Statistic: -3.9219921560235247\n", + "P-Value: 0.029486306559984257\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1500\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1501\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.41666666666666663\n", + "T-Statistic: -5.199400639874496\n", + "P-Value: 0.013822982018819301\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.35035310734463276\n", + "T-Statistic: -3.355609536673555\n", + "P-Value: 0.043875869462308204\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1501\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1502\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.46427595628415297\n", + "T-Statistic: -4.318179445331662\n", + "P-Value: 0.022881712741708818\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.36737288135593216\n", + "T-Statistic: -5.454903356437624\n", + "P-Value: 0.012103536343441582\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1502\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1503\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.3237704918032787\n", + "T-Statistic: -4.892471154105429\n", + "P-Value: 0.016335808762118268\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.288135593220339\n", + "Average of Other Ratios: 0.2362994350282486\n", + "T-Statistic: -7.416033307666337\n", + "P-Value: 0.005072640837140369\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1503\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1504\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.39166666666666666\n", + "T-Statistic: -3.8686420190221638\n", + "P-Value: 0.030555105638903693\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.3206920903954802\n", + "T-Statistic: -4.327561335547406\n", + "P-Value: 0.022749742422749963\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1504\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1505\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.37766393442622953\n", + "T-Statistic: -3.0183385183635814\n", + "P-Value: 0.05683401011881442\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.29957627118644065\n", + "T-Statistic: -7.052288308361696\n", + "P-Value: 0.005860100906942432\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1505\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1506\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4062158469945355\n", + "T-Statistic: -3.358719055760332\n", + "P-Value: 0.043774850277544984\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3422316384180791\n", + "T-Statistic: -2.414469415876127\n", + "P-Value: 0.09463683116855957\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1506\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1507\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.43333333333333335\n", + "T-Statistic: -6.89052992289735\n", + "P-Value: 0.006262188755552419\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.354590395480226\n", + "T-Statistic: -6.260853478619376\n", + "P-Value: 0.008223481825561816\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1507\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1508\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.42083333333333334\n", + "T-Statistic: -8.914754098360653\n", + "P-Value: 0.00297722406530442\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3882062146892655\n", + "T-Statistic: -4.699364429896171\n", + "P-Value: 0.018227552356258447\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1508\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1509\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.3416666666666666\n", + "T-Statistic: -7.603986987982712\n", + "P-Value: 0.004720052083609356\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.31624293785310736\n", + "T-Statistic: -3.384433655570972\n", + "P-Value: 0.042950673824966874\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1509\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1510\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.3691939890710383\n", + "T-Statistic: -1.986395773541501\n", + "P-Value: 0.14117716016377596\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.31235875706214694\n", + "T-Statistic: -8.330919086612344\n", + "P-Value: 0.003625030826573087\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1510\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1511\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.360724043715847\n", + "T-Statistic: -4.946082903302941\n", + "P-Value: 0.01585632915848264\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.28700564971751413\n", + "T-Statistic: -14.665257155667614\n", + "P-Value: 0.0006876691247397437\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1511\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1512\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.36666666666666664\n", + "T-Statistic: -4.588689352245944\n", + "P-Value: 0.019441198543417786\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3415960451977401\n", + "T-Statistic: -3.006477663714325\n", + "P-Value: 0.05737223684387089\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1512\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1513\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.3375\n", + "T-Statistic: -12.748930752673918\n", + "P-Value: 0.0010411491892625435\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.31193502824858754\n", + "T-Statistic: -2.8294238473612916\n", + "P-Value: 0.06622113261732844\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1513\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1514\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7377049180327869\n", + "Average of Other Ratios: 0.32499999999999996\n", + "T-Statistic: -12.2541580554229\n", + "P-Value: 0.0011703222583523416\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.3601694915254237\n", + "T-Statistic: -3.233808333817773\n", + "P-Value: 0.0480764627944047\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1514\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1515\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.3778005464480874\n", + "T-Statistic: -4.797285861708767\n", + "P-Value: 0.017234618038032837\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.35593220338983056\n", + "T-Statistic: -4.438525973846813\n", + "P-Value: 0.021261047550038326\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1515\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1516\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7666666666666667\n", + "Average of Other Ratios: 0.34405737704918027\n", + "T-Statistic: -14.172745497720056\n", + "P-Value: 0.0007609909456212724\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.3432203389830508\n", + "T-Statistic: -2.8168388126916613\n", + "P-Value: 0.06691301311594693\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1516\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1517\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.37083333333333335\n", + "T-Statistic: -5.3888664006939155\n", + "P-Value: 0.012519846650421048\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3334745762711865\n", + "T-Statistic: -3.3752877168731676\n", + "P-Value: 0.043241533194103954\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1517\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1518\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.4041666666666667\n", + "T-Statistic: -8.518263011092074\n", + "P-Value: 0.003398441269028304\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.4176553672316384\n", + "T-Statistic: -2.608655045302455\n", + "P-Value: 0.07977746445984121\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1518\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1519\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.3458333333333333\n", + "T-Statistic: -9.713343067748339\n", + "P-Value: 0.002317591752507357\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3330508474576271\n", + "T-Statistic: -3.8022134355217396\n", + "P-Value: 0.031956488835427684\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1519\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1520\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.30833333333333335\n", + "T-Statistic: -6.506748841466283\n", + "P-Value: 0.007372760445320987\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.26581920903954803\n", + "T-Statistic: -6.147383047873269\n", + "P-Value: 0.008659663805844972\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1520\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1521\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.42083333333333334\n", + "T-Statistic: -2.8626986790948408\n", + "P-Value: 0.06443431083077424\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.3882062146892655\n", + "T-Statistic: -7.465956735313377\n", + "P-Value: 0.004975689184389457\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1521\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1522\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.4065573770491804\n", + "T-Statistic: -6.594241817524086\n", + "P-Value: 0.007098081515958769\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.3728813559322034\n", + "T-Statistic: -2.4230728530694363\n", + "P-Value: 0.09391016538665153\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1522\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1523\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.34419398907103826\n", + "T-Statistic: -9.378588970336171\n", + "P-Value: 0.0025678094646998427\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.31207627118644066\n", + "T-Statistic: -2.1523998630107477\n", + "P-Value: 0.12043504718327508\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1523\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1524\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.3485655737704918\n", + "T-Statistic: -3.9566860100053627\n", + "P-Value: 0.028816950912038987\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.30856873822975517\n", + "T-Statistic: -2.102946127279898\n", + "P-Value: 0.1701873208222798\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1524\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1525\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.3726775956284153\n", + "T-Statistic: -3.463270042711585\n", + "P-Value: 0.040543787476660896\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.2913135593220339\n", + "T-Statistic: -4.4679931695295805\n", + "P-Value: 0.020887046587591118\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1525\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1526\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.43586065573770494\n", + "T-Statistic: -4.117687550148067\n", + "P-Value: 0.02595439887765405\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.3686440677966102\n", + "T-Statistic: -5.413333333333333\n", + "P-Value: 0.012363451013336186\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1526\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1527\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.42083333333333334\n", + "T-Statistic: -2.7244712370027178\n", + "P-Value: 0.07227644833071428\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.3516949152542373\n", + "T-Statistic: -3.2276227144907716\n", + "P-Value: 0.048303103292659674\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1527\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1528\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.37315573770491806\n", + "T-Statistic: -7.357526587712183\n", + "P-Value: 0.005189458410858861\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3209745762711864\n", + "T-Statistic: -3.5295270186824976\n", + "P-Value: 0.038652315352543654\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1528\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1529\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.819672131147541\n", + "Average of Other Ratios: 0.32083333333333336\n", + "T-Statistic: -12.69043363555155\n", + "P-Value: 0.0010554010131434192\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3119350282485876\n", + "T-Statistic: -2.259178910141389\n", + "P-Value: 0.10900865631385685\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1529\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1530\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.3372495446265938\n", + "T-Statistic: -4.51546348372934\n", + "P-Value: 0.04570861042365545\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.326271186440678\n", + "T-Statistic: -5.6348858004839935\n", + "P-Value: 0.011057151074446126\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1530\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1531\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.3416666666666667\n", + "T-Statistic: -3.2106696051862316\n", + "P-Value: 0.04893122743058373\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.2870762711864407\n", + "T-Statistic: -5.605795079572491\n", + "P-Value: 0.011218029506615877\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1531\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1532\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.3875\n", + "T-Statistic: -2.707548854683142\n", + "P-Value: 0.07331622631866143\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5423728813559322\n", + "Average of Other Ratios: 0.3502824858757062\n", + "T-Statistic: -4.93323850074808\n", + "P-Value: 0.015969516750107843\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1532\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1533\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.39398907103825137\n", + "T-Statistic: -3.677608363112817\n", + "P-Value: 0.03481512208257183\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3292372881355932\n", + "T-Statistic: -1.9315505529107941\n", + "P-Value: 0.14893966449897095\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1533\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1534\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4894808743169399\n", + "T-Statistic: -2.350517220396054\n", + "P-Value: 0.1002587000234438\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.4470338983050848\n", + "T-Statistic: -2.370301890762351\n", + "P-Value: 0.0984770870147508\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1534\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1535\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.4025273224043716\n", + "T-Statistic: -3.2715037607881903\n", + "P-Value: 0.04672400543382343\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.33340395480225987\n", + "T-Statistic: -2.965463727554792\n", + "P-Value: 0.0592833948188603\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1535\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1536\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.36441256830601093\n", + "T-Statistic: -3.6162952233041508\n", + "P-Value: 0.036342061007907864\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.3347457627118644\n", + "T-Statistic: -3.0800000000000014\n", + "P-Value: 0.05413592185295247\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1536\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1537\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.4438524590163935\n", + "T-Statistic: -17.78543820658479\n", + "P-Value: 0.0003875755523465665\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.4044256120527307\n", + "T-Statistic: -2.4634512493170466\n", + "P-Value: 0.13274843347385198\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1537\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1538\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.3333333333333333\n", + "T-Statistic: -5.158898896581134\n", + "P-Value: 0.014124257169801814\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.326271186440678\n", + "T-Statistic: -2.8103535287436237\n", + "P-Value: 0.0672730683623227\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1538\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1539\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.49760928961748635\n", + "T-Statistic: -3.887955865194524\n", + "P-Value: 0.030162518753766708\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.41949152542372886\n", + "T-Statistic: -2.4681862514695623\n", + "P-Value: 0.09020943675168575\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1539\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1540\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.4562841530054645\n", + "T-Statistic: -9.590620075287022\n", + "P-Value: 0.0024054083955540014\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.423728813559322\n", + "T-Statistic: -4.404164695679157\n", + "P-Value: 0.021708194051045772\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1540\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1541\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.4083333333333333\n", + "T-Statistic: -3.897367233401582\n", + "P-Value: 0.029973563805789114\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.40091807909604515\n", + "T-Statistic: -8.284302459961115\n", + "P-Value: 0.0036844984307791845\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1541\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1542\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.4\n", + "T-Statistic: -6.306001458450521\n", + "P-Value: 0.008057963024016634\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3587570621468926\n", + "T-Statistic: -3.775478418443893\n", + "P-Value: 0.032543731166538116\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1542\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1543\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.40628415300546444\n", + "T-Statistic: -2.0963572160561217\n", + "P-Value: 0.1270050338262828\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.35854519774011306\n", + "T-Statistic: -1.7390744938729776\n", + "P-Value: 0.18040454692234978\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1543\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1544\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.40416666666666673\n", + "T-Statistic: -2.1561984129969214\n", + "P-Value: 0.12000461051054315\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.35021186440677965\n", + "T-Statistic: -3.0253320224395073\n", + "P-Value: 0.05651962666233991\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1544\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1545\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.4229508196721311\n", + "T-Statistic: -4.136976592067392\n", + "P-Value: 0.025636466240534804\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3923022598870057\n", + "T-Statistic: -2.2432895898859275\n", + "P-Value: 0.11062363137762433\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1545\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1546\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.42691256830601093\n", + "T-Statistic: -3.084110518603815\n", + "P-Value: 0.053961847840379426\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3714689265536723\n", + "T-Statistic: -5.879572942861632\n", + "P-Value: 0.009816556345478428\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1546\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1547\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.35833333333333334\n", + "T-Statistic: -4.84204603440025\n", + "P-Value: 0.016804152704825513\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3338276836158192\n", + "T-Statistic: -3.2632296876876485\n", + "P-Value: 0.047016699886531045\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1547\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1548\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.4023907103825136\n", + "T-Statistic: -6.903695753068684\n", + "P-Value: 0.006228128127498301\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.35169491525423724\n", + "T-Statistic: -12.115804854516572\n", + "P-Value: 0.0012102202443320875\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1548\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1549\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.37083333333333335\n", + "T-Statistic: -22.322514291558807\n", + "P-Value: 0.00019683968706694644\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3627824858757063\n", + "T-Statistic: -8.098378653528943\n", + "P-Value: 0.003934927255862458\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1549\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1550\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.32586520947176684\n", + "T-Statistic: -3.2146877218511154\n", + "P-Value: 0.08465882990859916\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.32062146892655363\n", + "T-Statistic: -9.054535625012225\n", + "P-Value: 0.002845269537083193\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1550\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1551\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.38995901639344266\n", + "T-Statistic: -19.23389711083783\n", + "P-Value: 0.0003069437053761336\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.3686440677966102\n", + "T-Statistic: -6.772746506262815\n", + "P-Value: 0.006578095712086025\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1551\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1552\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.44815573770491807\n", + "T-Statistic: -12.270797400644883\n", + "P-Value: 0.0011656421347428028\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.40925141242937857\n", + "T-Statistic: -4.746377851626824\n", + "P-Value: 0.01774180468795205\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1552\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1553\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.37083333333333335\n", + "T-Statistic: -3.3590074850087963\n", + "P-Value: 0.043765495024786334\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.3305084745762712\n", + "T-Statistic: -5.565656594987991\n", + "P-Value: 0.011445072212549055\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1553\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1554\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.3208333333333333\n", + "T-Statistic: -8.192450020709705\n", + "P-Value: 0.003805499272772318\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.38333333333333336\n", + "Average of Other Ratios: 0.3220338983050847\n", + "T-Statistic: -2.557369525291349\n", + "P-Value: 0.08340507155260364\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1554\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1555\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.5062841530054645\n", + "T-Statistic: -5.14902114352389\n", + "P-Value: 0.01419903025134191\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.559322033898305\n", + "Average of Other Ratios: 0.40557909604519776\n", + "T-Statistic: -3.3491158311330995\n", + "P-Value: 0.044087787022349285\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1555\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1556\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.38545081967213113\n", + "T-Statistic: -6.522157996146436\n", + "P-Value: 0.007323382596367344\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.34576271186440677\n", + "T-Statistic: -6.516946235415337\n", + "P-Value: 0.007340034612930771\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1556\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1557\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.3625\n", + "T-Statistic: -5.036188941644216\n", + "P-Value: 0.015090956088189262\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.28255649717514125\n", + "T-Statistic: -2.8447438754418775\n", + "P-Value: 0.06539088569746056\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1557\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1558\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.38333333333333336\n", + "T-Statistic: -4.5941415869804265\n", + "P-Value: 0.019378995099920374\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.3204802259887006\n", + "T-Statistic: -2.685797986127019\n", + "P-Value: 0.0746800724946613\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1558\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1559\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.46045081967213114\n", + "T-Statistic: -2.451392373266136\n", + "P-Value: 0.09156587040440396\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3375706214689266\n", + "T-Statistic: -3.7567808109943908\n", + "P-Value: 0.03296266830646462\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1559\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1560\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.4041666666666667\n", + "T-Statistic: -3.3807925592909616\n", + "P-Value: 0.04306616942393178\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3309792843691149\n", + "T-Statistic: -1.7858245155746162\n", + "P-Value: 0.2160472917948211\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1560\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1561\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.49583333333333335\n", + "T-Statistic: -2.6281319368691185\n", + "P-Value: 0.07845092410121779\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.4471045197740113\n", + "T-Statistic: -3.0677087606277684\n", + "P-Value: 0.054660670246442895\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1561\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1562\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7166666666666667\n", + "Average of Other Ratios: 0.38551912568306007\n", + "T-Statistic: -7.359999679334292\n", + "P-Value: 0.0051844490931802715\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.35918079096045197\n", + "T-Statistic: -2.275676500715352\n", + "P-Value: 0.10736186186928126\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1562\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1563\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7166666666666667\n", + "Average of Other Ratios: 0.31905737704918036\n", + "T-Statistic: -9.174911614798257\n", + "P-Value: 0.002737770634718071\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.29936440677966103\n", + "T-Statistic: -9.423964347365773\n", + "P-Value: 0.0025318610514215717\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1563\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1564\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.4083333333333333\n", + "T-Statistic: -3.0623788546160213\n", + "P-Value: 0.054890208233557496\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.35847457627118645\n", + "T-Statistic: -3.2665066947432746\n", + "P-Value: 0.04690049922235536\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1564\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1565\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.43041894353369764\n", + "T-Statistic: -1.6922369290882864\n", + "P-Value: 0.23267507692719033\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.3969632768361582\n", + "T-Statistic: -2.896194824692244\n", + "P-Value: 0.06269598939057865\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1565\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1566\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.36489071038251364\n", + "T-Statistic: -7.458228376445201\n", + "P-Value: 0.004990536830131611\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.35\n", + "Average of Other Ratios: 0.3177966101694915\n", + "T-Statistic: -7.600000000000018\n", + "P-Value: 0.004727189949278468\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1566\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1567\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.39842896174863385\n", + "T-Statistic: -14.529811045035604\n", + "P-Value: 0.000706861443434645\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.364406779661017\n", + "T-Statistic: -1.7643529058752923\n", + "P-Value: 0.17586309782892548\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1567\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1568\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.3861338797814208\n", + "T-Statistic: -3.095250278326531\n", + "P-Value: 0.05349362654863495\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.34152542372881356\n", + "T-Statistic: -2.7297642578116172\n", + "P-Value: 0.07195498560460242\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1568\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1569\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.3375\n", + "T-Statistic: -8.553875315773226\n", + "P-Value: 0.003357503807386855\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3419491525423729\n", + "T-Statistic: -2.59993484464519\n", + "P-Value: 0.08038034833840463\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1569\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1570\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.4355191256830601\n", + "T-Statistic: -7.054729326958077\n", + "P-Value: 0.0058542974670971835\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.38834745762711864\n", + "T-Statistic: -7.704320406910871\n", + "P-Value: 0.00454501569408191\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1570\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1571\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.39583333333333326\n", + "T-Statistic: -4.287445285495584\n", + "P-Value: 0.023321028120091257\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.32033898305084746\n", + "T-Statistic: -3.891547782585247\n", + "P-Value: 0.06013719541915213\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1571\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1572\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.38989071038251366\n", + "T-Statistic: -11.606555225890991\n", + "P-Value: 0.0013736462375547754\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3543785310734463\n", + "T-Statistic: -5.771672200284073\n", + "P-Value: 0.010339886184267465\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1572\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1573\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.40211748633879785\n", + "T-Statistic: -3.9527199474578647\n", + "P-Value: 0.028892473194063042\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.3474576271186441\n", + "T-Statistic: -4.802499349297195\n", + "P-Value: 0.017183746707078684\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1573\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1574\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.3875\n", + "T-Statistic: -2.3837761964250683\n", + "P-Value: 0.09728573796120057\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3504237288135593\n", + "T-Statistic: -3.262064802178429\n", + "P-Value: 0.04705809381582309\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1574\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1575\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.3958333333333333\n", + "T-Statistic: -5.180131981040665\n", + "P-Value: 0.013965256620241263\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.34611581920903955\n", + "T-Statistic: -2.3420605578207687\n", + "P-Value: 0.10103215816449211\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1575\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1576\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.48750000000000004\n", + "T-Statistic: -5.206088992974237\n", + "P-Value: 0.01377403946898648\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.4430790960451977\n", + "T-Statistic: -12.700432204420535\n", + "P-Value: 0.001052946749526643\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1576\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1577\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.3984289617486339\n", + "T-Statistic: -3.252508058107846\n", + "P-Value: 0.047399438327312575\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.36292372881355933\n", + "T-Statistic: -5.327313464522444\n", + "P-Value: 0.012924898405343976\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1577\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1578\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.45833333333333326\n", + "T-Statistic: -3.755595353518015\n", + "P-Value: 0.032989461923480595\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.38425141242937855\n", + "T-Statistic: -4.698424427455757\n", + "P-Value: 0.018237439696403804\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1578\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1579\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.43333333333333335\n", + "T-Statistic: -5.352084912420385\n", + "P-Value: 0.012759859239265056\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3882768361581921\n", + "T-Statistic: -6.446590136835602\n", + "P-Value: 0.007569780791386365\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1579\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1580\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.3859289617486339\n", + "T-Statistic: -6.327570523296936\n", + "P-Value: 0.007980431881706069\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.328954802259887\n", + "T-Statistic: -7.995948131756914\n", + "P-Value: 0.004082555066940009\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1580\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1581\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.3583333333333333\n", + "T-Statistic: -8.261956373376224\n", + "P-Value: 0.0037134622752403278\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3249293785310734\n", + "T-Statistic: -4.059276987931347\n", + "P-Value: 0.026948683537732478\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1581\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1582\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.39583333333333337\n", + "T-Statistic: -14.806483893669949\n", + "P-Value: 0.0006683885463905405\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.3601694915254237\n", + "T-Statistic: -4.444444444444445\n", + "P-Value: 0.0211852376832342\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1582\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1583\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.375\n", + "T-Statistic: -6.0055128469177514\n", + "P-Value: 0.009248776088665693\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.3135593220338983\n", + "T-Statistic: -6.483004673258841\n", + "P-Value: 0.007449708002644034\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1583\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1584\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7333333333333333\n", + "Average of Other Ratios: 0.3855874316939891\n", + "T-Statistic: -13.52760363997253\n", + "P-Value: 0.0008736345306870901\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3586158192090395\n", + "T-Statistic: -3.921613238217157\n", + "P-Value: 0.029493726846163477\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1584\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1585\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.375\n", + "T-Statistic: -5.923439030264504\n", + "P-Value: 0.009613733901387398\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3038135593220339\n", + "T-Statistic: -9.848354363498629\n", + "P-Value: 0.0022258218489771527\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1585\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1586\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7704918032786885\n", + "Average of Other Ratios: 0.32499999999999996\n", + "T-Statistic: -15.651197399435246\n", + "P-Value: 0.0005668681892135681\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.30819209039548023\n", + "T-Statistic: -2.8185816226035203\n", + "P-Value: 0.06681666397008225\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1586\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1587\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7704918032786885\n", + "Average of Other Ratios: 0.3041666666666667\n", + "T-Statistic: -10.064072122074942\n", + "P-Value: 0.0020889346595399047\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3077683615819209\n", + "T-Statistic: -4.319796363784691\n", + "P-Value: 0.02285889788909435\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1587\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1588\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.3625\n", + "T-Statistic: -4.290763799723821\n", + "P-Value: 0.02327307093304941\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.36666666666666664\n", + "Average of Other Ratios: 0.3008474576271187\n", + "T-Statistic: -3.7673867088977024\n", + "P-Value: 0.032724190890523516\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1588\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1589\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.37916666666666665\n", + "T-Statistic: -7.686877814143884\n", + "P-Value: 0.004574822356362937\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.32909604519774016\n", + "T-Statistic: -5.006421579143373\n", + "P-Value: 0.015338372293344877\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1589\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1590\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.24057377049180326\n", + "T-Statistic: -10.793811013450267\n", + "P-Value: 0.0017009322854444507\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.26666666666666666\n", + "Average of Other Ratios: 0.1822033898305085\n", + "T-Statistic: -10.409843826150722\n", + "P-Value: 0.001891887685589342\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1590\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1591\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.40635245901639344\n", + "T-Statistic: -3.8261690084495177\n", + "P-Value: 0.03144179376080752\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.32895480225988705\n", + "T-Statistic: -2.949093002418226\n", + "P-Value: 0.060068491192589654\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1591\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1592\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.4396857923497268\n", + "T-Statistic: -6.979319263082819\n", + "P-Value: 0.006037138823969032\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.4213747645951036\n", + "T-Statistic: -3.60190807688241\n", + "P-Value: 0.06917625132966015\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1592\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1593\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.35833333333333334\n", + "T-Statistic: -5.646420206490955\n", + "P-Value: 0.010994199496502145\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3712570621468927\n", + "T-Statistic: -2.1556241033270016\n", + "P-Value: 0.12006957075894573\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1593\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1594\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.35724043715847\n", + "T-Statistic: -2.2370139019506667\n", + "P-Value: 0.11126942224811241\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.3081214689265537\n", + "T-Statistic: -3.8998685439923104\n", + "P-Value: 0.029923600161968773\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1594\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1595\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4478825136612022\n", + "T-Statistic: -5.2484139227280515\n", + "P-Value: 0.01346950981253263\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3206214689265537\n", + "T-Statistic: -3.0461946335774264\n", + "P-Value: 0.05559466924951487\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1595\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1596\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.4541666666666667\n", + "T-Statistic: -3.811215023955327\n", + "P-Value: 0.03176182755813695\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.3803672316384181\n", + "T-Statistic: -2.5550917784808758\n", + "P-Value: 0.08357084637244536\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1596\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1597\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.3770491803278689\n", + "T-Statistic: -5.292311191396577\n", + "P-Value: 0.013162886761926772\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3311676082862524\n", + "T-Statistic: -2.2490974729241855\n", + "P-Value: 0.1534474328108915\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1597\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1598\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.8333333333333334\n", + "Average of Other Ratios: 0.4108606557377049\n", + "T-Statistic: -28.554907520499913\n", + "P-Value: 9.430057361661532e-05\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3293079096045198\n", + "T-Statistic: -8.10248857720817\n", + "P-Value: 0.003929152121721815\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1598\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1599\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4064207650273224\n", + "T-Statistic: -5.716648520223962\n", + "P-Value: 0.01062086627428458\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.36666666666666664\n", + "Average of Other Ratios: 0.3432203389830508\n", + "T-Statistic: -5.53333333333333\n", + "P-Value: 0.011632297103592594\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1599\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1600\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4354508196721312\n", + "T-Statistic: -6.567850831861847\n", + "P-Value: 0.007179495661119332\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.401271186440678\n", + "T-Statistic: -2.459129645217466\n", + "P-Value: 0.09093785730195074\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1600\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1601\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.4625\n", + "T-Statistic: -4.525936567821964\n", + "P-Value: 0.02017613595003384\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5423728813559322\n", + "Average of Other Ratios: 0.4557909604519774\n", + "T-Statistic: -3.177872547530921\n", + "P-Value: 0.05017596813649382\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1601\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1602\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.2616120218579235\n", + "T-Statistic: -5.789061661601357\n", + "P-Value: 0.010253114813387197\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.23728813559322035\n", + "Average of Other Ratios: 0.19837570621468925\n", + "T-Statistic: -4.553719008264464\n", + "P-Value: 0.01984639027416479\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1602\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1603\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.4208333333333333\n", + "T-Statistic: -4.46482463507427\n", + "P-Value: 0.020926850914564546\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.39279661016949147\n", + "T-Statistic: -2.2752115860184423\n", + "P-Value: 0.10740785666680364\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1603\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1604\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.373292349726776\n", + "T-Statistic: -5.837995555994575\n", + "P-Value: 0.010014006051520334\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3667372881355932\n", + "T-Statistic: -2.5590817041221827\n", + "P-Value: 0.08328072516772207\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1604\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1605\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.32083333333333336\n", + "T-Statistic: -5.409452966923079\n", + "P-Value: 0.012388083734135366\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.307909604519774\n", + "T-Statistic: -4.795831523312723\n", + "P-Value: 0.017248843874307026\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1605\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1606\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7213114754098361\n", + "Average of Other Ratios: 0.37083333333333335\n", + "T-Statistic: -13.136517577876024\n", + "P-Value: 0.0009528915088720033\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.39646892655367233\n", + "T-Statistic: -2.0743037811227554\n", + "P-Value: 0.12970694368573776\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1606\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1607\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.38565573770491807\n", + "T-Statistic: -1.9388846252338177\n", + "P-Value: 0.1478731519159202\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.29199623352165727\n", + "T-Statistic: -4.218647880820993\n", + "P-Value: 0.05185732044957473\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1607\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1608\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.33975409836065573\n", + "T-Statistic: -7.109185568026944\n", + "P-Value: 0.005726775618868671\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.307909604519774\n", + "T-Statistic: -3.8877095717511785\n", + "P-Value: 0.030167484179746327\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1608\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1609\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.4028005464480875\n", + "T-Statistic: -4.245647176969608\n", + "P-Value: 0.02393615072729287\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.3389830508474576\n", + "T-Statistic: -3.1281966106828594\n", + "P-Value: 0.05213847018410804\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1609\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1610\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.31509562841530053\n", + "T-Statistic: -8.1350398352272\n", + "P-Value: 0.003883805394600376\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.2911016949152542\n", + "T-Statistic: -3.8194407004959707\n", + "P-Value: 0.03158527352615677\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1610\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1611\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.275\n", + "T-Statistic: -8.547594094887065\n", + "P-Value: 0.0033646767112774094\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.36666666666666664\n", + "Average of Other Ratios: 0.2796610169491525\n", + "T-Statistic: -3.422222222222223\n", + "P-Value: 0.041774946157591517\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1611\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1612\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.37739071038251365\n", + "T-Statistic: -4.570466183924919\n", + "P-Value: 0.019650992181636566\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.35\n", + "Average of Other Ratios: 0.3008474576271186\n", + "T-Statistic: -3.396132253860897\n", + "P-Value: 0.042582247398141884\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1612\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1613\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.38749999999999996\n", + "T-Statistic: -10.334980525866824\n", + "P-Value: 0.001932389182639011\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.4008474576271187\n", + "T-Statistic: -5.126174517160044\n", + "P-Value: 0.01437396472653316\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1613\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1614\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.46461748633879785\n", + "T-Statistic: -6.40281942286815\n", + "P-Value: 0.007717499991952441\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.4093926553672317\n", + "T-Statistic: -3.8752447538675496\n", + "P-Value: 0.030420159550251232\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1614\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1615\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.39166666666666666\n", + "T-Statistic: -4.3147109239249755\n", + "P-Value: 0.022930753286261416\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.3220338983050848\n", + "T-Statistic: -2.456339176607667\n", + "P-Value: 0.0911637428376164\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1615\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1616\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.3690573770491803\n", + "T-Statistic: -3.785362525900684\n", + "P-Value: 0.0323250246243217\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.28721751412429375\n", + "T-Statistic: -3.7810435820759127\n", + "P-Value: 0.03242035778750225\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1616\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1617\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4151639344262295\n", + "T-Statistic: -3.7566753428966173\n", + "P-Value: 0.03296505095792107\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3291666666666667\n", + "T-Statistic: -4.334673395356118\n", + "P-Value: 0.022650355990863152\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1617\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1618\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.35416666666666663\n", + "T-Statistic: -17.816045884784717\n", + "P-Value: 0.0003855963684750506\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.2828389830508475\n", + "T-Statistic: -10.598274792221071\n", + "P-Value: 0.0017948089853278408\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1618\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1619\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.35239071038251363\n", + "T-Statistic: -4.290395689342995\n", + "P-Value: 0.023278384354672565\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.3050847457627119\n", + "T-Statistic: -2.1744034361245617\n", + "P-Value: 0.1179670753099225\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1619\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1620\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.8032786885245902\n", + "Average of Other Ratios: 0.4291666666666667\n", + "T-Statistic: -17.0700379678159\n", + "P-Value: 0.00043795305550041073\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6271186440677966\n", + "Average of Other Ratios: 0.4050847457627119\n", + "T-Statistic: -9.240027567747635\n", + "P-Value: 0.0026818554468391116\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1620\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1621\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7213114754098361\n", + "Average of Other Ratios: 0.4\n", + "T-Statistic: -27.264182776241938\n", + "P-Value: 0.00010829140307033679\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3543785310734463\n", + "T-Statistic: -7.613153050047849\n", + "P-Value: 0.004703696031017581\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1621\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1622\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.4166666666666667\n", + "T-Statistic: -3.172289994751985\n", + "P-Value: 0.05039180926567533\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.3940677966101695\n", + "T-Statistic: -2.976518270891203\n", + "P-Value: 0.05876051507230344\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1622\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1623\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.3867030965391621\n", + "T-Statistic: -18.78378378378386\n", + "P-Value: 0.0028222292471545055\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.38411016949152543\n", + "T-Statistic: -7.506646894907283\n", + "P-Value: 0.0048984693502791745\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1623\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1624\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.35833333333333334\n", + "T-Statistic: -6.460980283821876\n", + "P-Value: 0.007522028995284054\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.39668079096045195\n", + "T-Statistic: -3.674203780886493\n", + "P-Value: 0.034897733800121254\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1624\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1625\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.4041666666666667\n", + "T-Statistic: -8.291518233493496\n", + "P-Value: 0.0036752095251250446\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3627824858757062\n", + "T-Statistic: -5.044529690751584\n", + "P-Value: 0.015022564694235147\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1625\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1626\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.38524590163934425\n", + "T-Statistic: -2.953558943086598\n", + "P-Value: 0.059853030783475765\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.32083333333333336\n", + "T-Statistic: -5.361374140743174\n", + "P-Value: 0.012698680109057153\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1626\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1627\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.4278005464480874\n", + "T-Statistic: -3.220215216567121\n", + "P-Value: 0.048576294366246366\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.371045197740113\n", + "T-Statistic: -5.171216742476558\n", + "P-Value: 0.014031731053501115\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1627\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1628\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.35416666666666663\n", + "T-Statistic: -7.776108221599537\n", + "P-Value: 0.00442500536007881\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.38333333333333336\n", + "Average of Other Ratios: 0.3347457627118644\n", + "T-Statistic: -3.822222222222226\n", + "P-Value: 0.03152585651673634\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1628\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1629\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.3375\n", + "T-Statistic: -6.2978017747795265\n", + "P-Value: 0.00808769678640319\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.3038135593220339\n", + "T-Statistic: -27.666666666666476\n", + "P-Value: 0.00010364812386939461\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1629\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1630\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.39392076502732243\n", + "T-Statistic: -2.8228292898095364\n", + "P-Value: 0.06658256000513207\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3425612052730697\n", + "T-Statistic: -7.542807340088425\n", + "P-Value: 0.01712633052558875\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1630\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1631\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.3321038251366121\n", + "T-Statistic: -9.748782925528516\n", + "P-Value: 0.002293024514092687\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.2867937853107344\n", + "T-Statistic: -3.780359197747885\n", + "P-Value: 0.032435497445757994\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1631\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1632\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.3729508196721311\n", + "T-Statistic: -3.16062146897757\n", + "P-Value: 0.05084675449389502\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.35430790960451974\n", + "T-Statistic: -2.986076879787708\n", + "P-Value: 0.058313047921487345\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1632\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1633\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.31974043715846995\n", + "T-Statistic: -16.761645970184002\n", + "P-Value: 0.0004623636950420215\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3220338983050847\n", + "Average of Other Ratios: 0.2911016949152543\n", + "T-Statistic: -2.5053535141492107\n", + "P-Value: 0.08729389266327836\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1633\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1634\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4396174863387978\n", + "T-Statistic: -7.124258314479804\n", + "P-Value: 0.0056921272252719556\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.38389830508474576\n", + "T-Statistic: -3.5352476162925313\n", + "P-Value: 0.03849432965830513\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1634\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1635\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.46072404371584696\n", + "T-Statistic: -5.791146439440572\n", + "P-Value: 0.010242776156530948\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3296610169491525\n", + "T-Statistic: -2.932194632545476\n", + "P-Value: 0.060892602743676055\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1635\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1636\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.4125\n", + "T-Statistic: -7.478844437595781\n", + "P-Value: 0.00495105882203386\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3145951035781544\n", + "T-Statistic: -5.112873614515615\n", + "P-Value: 0.036189580961726336\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1636\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1637\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.4194672131147541\n", + "T-Statistic: -3.705356086132572\n", + "P-Value: 0.0341510693846338\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.388135593220339\n", + "T-Statistic: -3.316624790355395\n", + "P-Value: 0.04516783583455813\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1637\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1638\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.2583333333333333\n", + "T-Statistic: -11.303389666068615\n", + "P-Value: 0.0014850505321337656\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.24039548022598872\n", + "T-Statistic: -2.2432542767385697\n", + "P-Value: 0.1106272525728738\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1638\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1639\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4262295081967213\n", + "Average of Other Ratios: 0.37499999999999994\n", + "T-Statistic: -2.582483761527067\n", + "P-Value: 0.0816038163981699\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.2913135593220339\n", + "T-Statistic: -4.63515148563193\n", + "P-Value: 0.01891930032646319\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1639\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1640\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.3319672131147541\n", + "T-Statistic: -16.69265269416492\n", + "P-Value: 0.00046807129563190304\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.31645480225988704\n", + "T-Statistic: -7.447420720142193\n", + "P-Value: 0.005011398574053057\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1640\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1641\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.42718579234972676\n", + "T-Statistic: -4.989229131610327\n", + "P-Value: 0.01548368101755126\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.4025423728813559\n", + "T-Statistic: -4.372697328618899\n", + "P-Value: 0.02212841807396774\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1641\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1642\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.37916666666666665\n", + "T-Statistic: -11.48552138205233\n", + "P-Value: 0.0014167460006659608\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.36525423728813555\n", + "T-Statistic: -9.888888888888882\n", + "P-Value: 0.010071751888174055\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1642\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1643\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.3734289617486339\n", + "T-Statistic: -7.373424834781318\n", + "P-Value: 0.0051573670428228195\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.31631355932203387\n", + "T-Statistic: -4.212334695138946\n", + "P-Value: 0.024441477588059136\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1643\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1644\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.8\n", + "Average of Other Ratios: 0.3737704918032787\n", + "T-Statistic: -11.276355501471617\n", + "P-Value: 0.0014955591121438806\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3838983050847458\n", + "T-Statistic: -13.55263062255603\n", + "P-Value: 0.0008688656783751102\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1644\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1645\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.35833333333333334\n", + "T-Statistic: -6.697501094028932\n", + "P-Value: 0.006790960911751886\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.29583333333333334\n", + "T-Statistic: -3.9331245586533203\n", + "P-Value: 0.029269369284174996\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1645\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1646\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7377049180327869\n", + "Average of Other Ratios: 0.37916666666666665\n", + "T-Statistic: -18.208323627746466\n", + "P-Value: 0.0003613808590242596\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3418079096045198\n", + "T-Statistic: -5.938574464184704\n", + "P-Value: 0.00954502881900154\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1646\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1647\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7213114754098361\n", + "Average of Other Ratios: 0.47500000000000003\n", + "T-Statistic: -5.911475409836067\n", + "P-Value: 0.009668500550400335\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5423728813559322\n", + "Average of Other Ratios: 0.42189265536723164\n", + "T-Statistic: -2.446859704851505\n", + "P-Value: 0.09193623834826871\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1647\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1648\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.35\n", + "T-Statistic: -3.567189130512097\n", + "P-Value: 0.03762717652367037\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.31242937853107344\n", + "T-Statistic: -7.44762841439633\n", + "P-Value: 0.005010996583977287\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1648\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1649\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.4062841530054645\n", + "T-Statistic: -1.9500758855993163\n", + "P-Value: 0.1462629282557643\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.35021186440677965\n", + "T-Statistic: -6.955318569856883\n", + "P-Value: 0.006096907162548966\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1649\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1650\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.3291666666666666\n", + "T-Statistic: -35.834028669908896\n", + "P-Value: 4.779336400647669e-05\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3627824858757063\n", + "T-Statistic: -6.8705764571632155\n", + "P-Value: 0.006314278455610205\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1650\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1651\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.36475409836065575\n", + "T-Statistic: -5.115536577871306\n", + "P-Value: 0.014456377843406167\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3165254237288136\n", + "T-Statistic: -8.288366308708238\n", + "P-Value: 0.0036792632033313915\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1651\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1652\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.3778005464480874\n", + "T-Statistic: -2.8771732821175253\n", + "P-Value: 0.06367584045734935\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.31666666666666665\n", + "T-Statistic: -4.566003911847601\n", + "P-Value: 0.019702811079520054\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1652\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1653\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.39064207650273225\n", + "T-Statistic: -2.5781209852352887\n", + "P-Value: 0.08191326217312117\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.364406779661017\n", + "T-Statistic: -2.9623708453941098\n", + "P-Value: 0.05943073144700672\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1653\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1654\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.398155737704918\n", + "T-Statistic: -6.811947916107325\n", + "P-Value: 0.006470670821303707\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3332627118644068\n", + "T-Statistic: -2.9486299139697687\n", + "P-Value: 0.06009088857589412\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1654\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1655\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.35416666666666663\n", + "T-Statistic: -3.722323789331516\n", + "P-Value: 0.03375297717263195\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.32040960451977407\n", + "T-Statistic: -1.8464888902941\n", + "P-Value: 0.16198897032955312\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1655\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1656\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.40225409836065573\n", + "T-Statistic: -7.458643842395928\n", + "P-Value: 0.004989737155548977\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.341454802259887\n", + "T-Statistic: -2.2689902462968896\n", + "P-Value: 0.10802564058574593\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1656\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1657\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.39583333333333337\n", + "T-Statistic: -4.726024019252636\n", + "P-Value: 0.017950011968011444\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.31242937853107344\n", + "T-Statistic: -5.102734357993427\n", + "P-Value: 0.01455637683815113\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1657\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1658\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.2833333333333333\n", + "T-Statistic: -23.253380378036304\n", + "P-Value: 0.00017423225248505516\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.35\n", + "Average of Other Ratios: 0.2754237288135593\n", + "T-Statistic: -2.66340767777208\n", + "P-Value: 0.07611699066362566\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1658\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1659\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.4375\n", + "T-Statistic: -3.696204554145968\n", + "P-Value: 0.034368276506610014\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.35035310734463276\n", + "T-Statistic: -4.624808617471442\n", + "P-Value: 0.019033890844563105\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1659\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1660\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.41976320582877963\n", + "T-Statistic: -5.126120909780012\n", + "P-Value: 0.03601274479900024\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.37549435028248584\n", + "T-Statistic: -2.153106812065672\n", + "P-Value: 0.12035479878967444\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1660\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1661\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4918032786885246\n", + "Average of Other Ratios: 0.31666666666666665\n", + "T-Statistic: -3.5694569453443568\n", + "P-Value: 0.03756656146901733\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2711864406779661\n", + "Average of Other Ratios: 0.24708097928436912\n", + "T-Statistic: -2.461538461538462\n", + "P-Value: 0.13291549663455224\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1661\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1662\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.31666666666666665\n", + "T-Statistic: -19.02035477866064\n", + "P-Value: 0.000317329217166385\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.30798022598870056\n", + "T-Statistic: -2.5144900114567874\n", + "P-Value: 0.08659496027337239\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1662\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1663\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6833333333333333\n", + "Average of Other Ratios: 0.2987704918032787\n", + "T-Statistic: -9.287563234189735\n", + "P-Value: 0.002641987913158529\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.26151129943502827\n", + "T-Statistic: -8.20761557188874\n", + "P-Value: 0.0037851627982459913\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1663\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1664\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.35\n", + "T-Statistic: -20.378585595507392\n", + "P-Value: 0.00025834295408300217\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.3177966101694915\n", + "T-Statistic: -3.377106525540539\n", + "P-Value: 0.043183492567890926\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1664\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1665\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.448292349726776\n", + "T-Statistic: -3.5599428053606994\n", + "P-Value: 0.03782169758880093\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.39216101694915256\n", + "T-Statistic: -4.374860785929049\n", + "P-Value: 0.02209919137309572\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1665\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1666\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.4021857923497268\n", + "T-Statistic: -8.675666340422445\n", + "P-Value: 0.0032223344164688237\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.3940677966101695\n", + "T-Statistic: -4.0051372633147\n", + "P-Value: 0.027914510109473\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1666\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1667\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.3958333333333333\n", + "T-Statistic: -12.228241897871557\n", + "P-Value: 0.0011776614876831963\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.30798022598870056\n", + "T-Statistic: -5.52252684001195\n", + "P-Value: 0.011695784203535333\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1667\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1668\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.4060109289617486\n", + "T-Statistic: -4.451765940749828\n", + "P-Value: 0.0210919401610888\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.41334745762711866\n", + "T-Statistic: -3.0332358222638836\n", + "P-Value: 0.05616694738868672\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1668\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1669\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.3400956284153005\n", + "T-Statistic: -16.126211408017976\n", + "P-Value: 0.0005186735594686228\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.33326271186440676\n", + "T-Statistic: -3.2380690490156394\n", + "P-Value: 0.04792113259547419\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1669\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1670\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.3583333333333334\n", + "T-Statistic: -21.740550300468346\n", + "P-Value: 0.00021299095231525587\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3247175141242938\n", + "T-Statistic: -3.864669010850272\n", + "P-Value: 0.030636676156145785\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1670\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1671\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.4187158469945355\n", + "T-Statistic: -5.0760762819997485\n", + "P-Value: 0.014767517268100116\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3754943502824859\n", + "T-Statistic: -18.459459459459406\n", + "P-Value: 0.0003469320912607935\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1671\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1672\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.4314890710382514\n", + "T-Statistic: -4.691101983797846\n", + "P-Value: 0.018314699259855377\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.35444915254237286\n", + "T-Statistic: -7.394856051762754\n", + "P-Value: 0.005114520056169129\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1672\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1673\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.4105191256830601\n", + "T-Statistic: -6.871714846368723\n", + "P-Value: 0.006311291308420261\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.38389830508474576\n", + "T-Statistic: -2.591193878173862\n", + "P-Value: 0.08099031623314684\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1673\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1674\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4023907103825136\n", + "T-Statistic: -3.1785087830863152\n", + "P-Value: 0.050151443196549406\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3372175141242938\n", + "T-Statistic: -2.2461944693225684\n", + "P-Value: 0.11032623843400814\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1674\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1675\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.38989071038251366\n", + "T-Statistic: -8.909952627909695\n", + "P-Value: 0.002981899212734121\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.35444915254237286\n", + "T-Statistic: -69.57142857142838\n", + "P-Value: 6.544177344803568e-06\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1675\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1676\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.3488387978142076\n", + "T-Statistic: -10.158428205179094\n", + "P-Value: 0.0020325642199010164\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.3347457627118644\n", + "T-Statistic: -2.262423733759691\n", + "P-Value: 0.10868236109185891\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1676\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1677\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.39016393442622954\n", + "T-Statistic: -4.155570602285484\n", + "P-Value: 0.025334728700013992\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.3305084745762712\n", + "T-Statistic: -2.886707772014044\n", + "P-Value: 0.06318231943174393\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1677\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1678\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.426844262295082\n", + "T-Statistic: -3.4665463985771736\n", + "P-Value: 0.04044752053565438\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.39661016949152544\n", + "T-Statistic: -5.396227020339848\n", + "P-Value: 0.012472526064506972\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1678\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1679\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.42295081967213116\n", + "T-Statistic: -5.864413389759132\n", + "P-Value: 0.009887952398219825\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3483050847457627\n", + "T-Statistic: -11.775818566563489\n", + "P-Value: 0.00713429023847155\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1679\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1680\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.4019808743169399\n", + "T-Statistic: -3.3292886989365154\n", + "P-Value: 0.044742920567822704\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3541666666666667\n", + "T-Statistic: -3.8771841553769035\n", + "P-Value: 0.030380667708462643\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1680\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1681\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.39999999999999997\n", + "T-Statistic: -9.082476164942625\n", + "P-Value: 0.0028198267013386054\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3629237288135593\n", + "T-Statistic: -4.044393553582212\n", + "P-Value: 0.02720985020655579\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1681\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1682\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.3541666666666667\n", + "T-Statistic: -5.142940376750061\n", + "P-Value: 0.014245317657868042\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.30798022598870056\n", + "T-Statistic: -2.584061622196982\n", + "P-Value: 0.08149225560483021\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1682\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1683\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7704918032786885\n", + "Average of Other Ratios: 0.3791666666666667\n", + "T-Statistic: -10.107871974839727\n", + "P-Value: 0.0020625133025401216\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3711158192090395\n", + "T-Statistic: -5.036029713302375\n", + "P-Value: 0.015092265651518118\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1683\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1684\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7704918032786885\n", + "Average of Other Ratios: 0.41666666666666663\n", + "T-Statistic: -15.679030900154354\n", + "P-Value: 0.0005638836832955063\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.38813559322033897\n", + "T-Statistic: -3.0606121836612235\n", + "P-Value: 0.05496655926720403\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1684\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1685\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.41666666666666663\n", + "T-Statistic: -12.515152510941379\n", + "P-Value: 0.0010996877982163017\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.3965395480225989\n", + "T-Statistic: -4.332720166230066\n", + "P-Value: 0.022677595074616603\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1685\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1686\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.4605191256830601\n", + "T-Statistic: -4.084201256082031\n", + "P-Value: 0.02651851352137615\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3933145009416196\n", + "T-Statistic: -6.776908139806704\n", + "P-Value: 0.021087634455750442\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1686\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1687\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.4396857923497267\n", + "T-Statistic: -2.8054956411927168\n", + "P-Value: 0.06754435460228396\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.40896892655367234\n", + "T-Statistic: -2.7252378965546162\n", + "P-Value: 0.07222977633186194\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1687\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1688\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.3416666666666667\n", + "T-Statistic: -10.239067447514469\n", + "P-Value: 0.001985976142870652\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.35854519774011295\n", + "T-Statistic: -7.385335801707823\n", + "P-Value: 0.005133495507004227\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1688\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1689\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.42083333333333334\n", + "T-Statistic: -5.382812644531547\n", + "P-Value: 0.012558941549593422\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3754943502824859\n", + "T-Statistic: -5.31399624894294\n", + "P-Value: 0.013014777489807249\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1689\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1690\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.42711748633879776\n", + "T-Statistic: -1.4944177904953027\n", + "P-Value: 0.2319280697354033\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.32881355932203393\n", + "T-Statistic: -3.1543372140892108\n", + "P-Value: 0.051093920277185956\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1690\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1691\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.40232240437158473\n", + "T-Statistic: -6.785424665478694\n", + "P-Value: 0.006543098179519793\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3628531073446328\n", + "T-Statistic: -5.382081108767228\n", + "P-Value: 0.012563676575340528\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1691\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1692\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.3818989071038251\n", + "T-Statistic: -3.1559440965062726\n", + "P-Value: 0.05103057602991868\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.34745762711864403\n", + "T-Statistic: -7.849246248313698\n", + "P-Value: 0.004307003389198978\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1692\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1693\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.375\n", + "T-Statistic: -3.141020234264478\n", + "P-Value: 0.051622720218920067\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3163841807909604\n", + "T-Statistic: -2.724020898427997\n", + "P-Value: 0.07230388107794879\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1693\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1694\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.33706739526411655\n", + "T-Statistic: -12.955859792395307\n", + "P-Value: 0.005904832207349002\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3038135593220339\n", + "T-Statistic: -4.135972456516446\n", + "P-Value: 0.025652892626118445\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1694\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1695\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7704918032786885\n", + "Average of Other Ratios: 0.32916666666666666\n", + "T-Statistic: -8.41751169737434\n", + "P-Value: 0.0035178920060083122\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.2786016949152542\n", + "T-Statistic: -6.564422610165478\n", + "P-Value: 0.007190161439169192\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1695\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1696\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.4541666666666666\n", + "T-Statistic: -3.061039563134522\n", + "P-Value: 0.0549480767673967\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.36864406779661013\n", + "T-Statistic: -3.3422909943494004\n", + "P-Value: 0.04431191289649832\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1696\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1697\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.39822404371584696\n", + "T-Statistic: -3.1169399358461005\n", + "P-Value: 0.052596564456531714\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.364406779661017\n", + "T-Statistic: -4.131182235954578\n", + "P-Value: 0.02573144168204107\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1697\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1698\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.425\n", + "T-Statistic: -4.093911885056293\n", + "P-Value: 0.026353313515894105\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.3813559322033898\n", + "T-Statistic: -2.919201796799047\n", + "P-Value: 0.06153589112407167\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1698\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1699\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.41072404371584703\n", + "T-Statistic: -16.306113587728134\n", + "P-Value: 0.0005018457959471869\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.35444915254237286\n", + "T-Statistic: -4.447863008870363\n", + "P-Value: 0.021141608672485282\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1699\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1700\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.46249999999999997\n", + "T-Statistic: -3.08642244690056\n", + "P-Value: 0.05386425088654483\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.559322033898305\n", + "Average of Other Ratios: 0.34611581920903955\n", + "T-Statistic: -7.952423126063747\n", + "P-Value: 0.004147501211922912\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1700\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1701\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.43155737704918035\n", + "T-Statistic: -7.41608714028745\n", + "P-Value: 0.00507253495755017\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.354590395480226\n", + "T-Statistic: -4.195847013586463\n", + "P-Value: 0.024696683385781026\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1701\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1702\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.42916666666666664\n", + "T-Statistic: -3.7914997183027954\n", + "P-Value: 0.03219017406449965\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.4514124293785311\n", + "T-Statistic: -2.7684223456685544\n", + "P-Value: 0.06966019639221771\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1702\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1703\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.4023224043715847\n", + "T-Statistic: -5.3872290083001095\n", + "P-Value: 0.012530405150481858\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.38983050847457623\n", + "T-Statistic: -3.3480885546378523\n", + "P-Value: 0.04412143039369844\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1703\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1704\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.3773224043715847\n", + "T-Statistic: -4.640651750707308\n", + "P-Value: 0.018858726540020358\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3332627118644068\n", + "T-Statistic: -3.6621184863223024\n", + "P-Value: 0.03519301414612658\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1704\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1705\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.3860655737704918\n", + "T-Statistic: -3.4475208334165512\n", + "P-Value: 0.04101062763956584\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.33742937853107347\n", + "T-Statistic: -4.962234528821576\n", + "P-Value: 0.015715475861098616\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1705\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1706\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.4333333333333334\n", + "T-Statistic: -5.465357642986741\n", + "P-Value: 0.01203929791125337\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.40098870056497177\n", + "T-Statistic: -3.2752006464574146\n", + "P-Value: 0.04659397436436988\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1706\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1707\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.3458333333333333\n", + "T-Statistic: -3.3291936924471166\n", + "P-Value: 0.04474608942509305\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.35\n", + "Average of Other Ratios: 0.2923728813559322\n", + "T-Statistic: -3.2984845004941277\n", + "P-Value: 0.04578545695456099\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1707\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1708\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.37315573770491806\n", + "T-Statistic: -2.5737901679797495\n", + "P-Value: 0.0822218718502907\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3333333333333333\n", + "Average of Other Ratios: 0.2754237288135593\n", + "T-Statistic: -3.609848715935058\n", + "P-Value: 0.03650753702324681\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1708\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1709\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.41666666666666663\n", + "T-Statistic: -9.248430069349528\n", + "P-Value: 0.0026747507004180183\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.3813559322033898\n", + "T-Statistic: -10.066666666666663\n", + "P-Value: 0.0020873570938939115\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1709\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1710\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.3398224043715847\n", + "T-Statistic: -7.915878353618565\n", + "P-Value: 0.004203089684479934\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.3220338983050848\n", + "T-Statistic: -5.039047529047529\n", + "P-Value: 0.01506747099332577\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1710\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1711\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.419603825136612\n", + "T-Statistic: -4.140759720616552\n", + "P-Value: 0.025574700755914715\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.3432203389830508\n", + "T-Statistic: -3.9231182932531077\n", + "P-Value: 0.029464267753910946\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1711\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1712\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.4316939890710383\n", + "T-Statistic: -11.006210975743855\n", + "P-Value: 0.0016061976208647793\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.43644067796610164\n", + "T-Statistic: -2.3596084289526735\n", + "P-Value: 0.09943521206177618\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1712\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1713\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.38627049180327866\n", + "T-Statistic: -2.911570443803633\n", + "P-Value: 0.06191769354930334\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.3360169491525423\n", + "T-Statistic: -1.4242424242424274\n", + "P-Value: 0.38970838325942464\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1713\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1714\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.46427595628415297\n", + "T-Statistic: -1.8536473961974427\n", + "P-Value: 0.16084088720625275\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5423728813559322\n", + "Average of Other Ratios: 0.4132768361581921\n", + "T-Statistic: -5.184819470451837\n", + "P-Value: 0.013930470217760019\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1714\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1715\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.4125\n", + "T-Statistic: -2.7434685305994506\n", + "P-Value: 0.07113088858607176\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.4091101694915254\n", + "T-Statistic: -3.301274514960128\n", + "P-Value: 0.04568977195819744\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1715\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1716\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.3528688524590164\n", + "T-Statistic: -4.518525749449755\n", + "P-Value: 0.02026529222371772\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.2870762711864407\n", + "T-Statistic: -8.442559461787118\n", + "P-Value: 0.003487683310568225\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1716\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1717\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4687158469945355\n", + "T-Statistic: -6.390672157378807\n", + "P-Value: 0.007759165768176083\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.40508474576271186\n", + "T-Statistic: -4.984211525609615\n", + "P-Value: 0.015526427125038359\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1717\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1718\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.39788251366120214\n", + "T-Statistic: -5.975152199128738\n", + "P-Value: 0.009381627777611113\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.40084745762711865\n", + "T-Statistic: -9.945373183606224\n", + "P-Value: 0.00216282978388815\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1718\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1719\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.44583333333333336\n", + "T-Statistic: -4.1303709900535255\n", + "P-Value: 0.025744775027266065\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3882062146892655\n", + "T-Statistic: -4.699364429896171\n", + "P-Value: 0.018227552356258447\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1719\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1720\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.32766393442622954\n", + "T-Statistic: -15.565235673998512\n", + "P-Value: 0.0005762193133083165\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.28700564971751413\n", + "T-Statistic: -4.0757619541037435\n", + "P-Value: 0.02666317079369889\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1720\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1721\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.39166666666666666\n", + "T-Statistic: -6.397744904460955\n", + "P-Value: 0.007734870011183786\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.358545197740113\n", + "T-Statistic: -5.255556820568775\n", + "P-Value: 0.01341898591418548\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1721\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1722\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.41666666666666663\n", + "T-Statistic: -2.647817826363915\n", + "P-Value: 0.0771377198138251\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.34187853107344635\n", + "T-Statistic: -4.739270182928911\n", + "P-Value: 0.017814153415116545\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1722\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1723\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.43128415300546447\n", + "T-Statistic: -3.57266399939355\n", + "P-Value: 0.03748105460475532\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.36299435028248583\n", + "T-Statistic: -4.303404214552464\n", + "P-Value: 0.02309156259614923\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1723\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1724\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.4625\n", + "T-Statistic: -2.7495312426784695\n", + "P-Value: 0.07077005262877357\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.4049435028248588\n", + "T-Statistic: -2.885264224008987\n", + "P-Value: 0.06325673205698319\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1724\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1725\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.3291666666666666\n", + "T-Statistic: -16.78635055652876\n", + "P-Value: 0.00046034248285934797\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.2825564971751412\n", + "T-Statistic: -4.239397969490223\n", + "P-Value: 0.024029911324908407\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1725\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1726\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.37083333333333335\n", + "T-Statistic: -3.985191937237371\n", + "P-Value: 0.02828153983006206\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.2996468926553672\n", + "T-Statistic: -2.9888839714804116\n", + "P-Value: 0.058182452145839086\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1726\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1727\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.3559426229508197\n", + "T-Statistic: -3.590780809031045\n", + "P-Value: 0.037002659109130365\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.2979284369114878\n", + "T-Statistic: -5.611049007782942\n", + "P-Value: 0.030324925803336726\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1727\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1728\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.8360655737704918\n", + "Average of Other Ratios: 0.30833333333333335\n", + "T-Statistic: -25.163943419716063\n", + "P-Value: 0.00013761668634114897\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.3248587570621469\n", + "T-Statistic: -6.42539604115687\n", + "P-Value: 0.0076408389421474725\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1728\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1729\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.8360655737704918\n", + "Average of Other Ratios: 0.30833333333333335\n", + "T-Statistic: -14.528409507245575\n", + "P-Value: 0.0007070637377049956\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3161016949152543\n", + "T-Statistic: -4.91247301430133\n", + "P-Value: 0.016154741914472536\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1729\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1730\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.4310109289617486\n", + "T-Statistic: -3.9857409240546327\n", + "P-Value: 0.028271354653200294\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.39668079096045195\n", + "T-Statistic: -4.042742188321896\n", + "P-Value: 0.02723902806724627\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1730\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1731\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.2958333333333334\n", + "T-Statistic: -5.2910842726589395\n", + "P-Value: 0.013171332285404133\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.25748587570621473\n", + "T-Statistic: -2.8937262538718946\n", + "P-Value: 0.06282208280557401\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1731\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1732\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.3568306010928962\n", + "T-Statistic: -24.813855969761136\n", + "P-Value: 0.00014350088541621058\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.30409604519774014\n", + "T-Statistic: -3.6040922833026183\n", + "P-Value: 0.036656113662429206\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1732\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1733\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7704918032786885\n", + "Average of Other Ratios: 0.45\n", + "T-Statistic: -21.064870818149505\n", + "P-Value: 0.00023403567730180875\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.4051553672316384\n", + "T-Statistic: -6.7314755578400405\n", + "P-Value: 0.006693743268842144\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1733\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1734\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.38135245901639336\n", + "T-Statistic: -5.256718568825\n", + "P-Value: 0.013410791921047589\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.326271186440678\n", + "T-Statistic: -8.548884639029154\n", + "P-Value: 0.00336320130822139\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1734\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1735\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.39009562841530054\n", + "T-Statistic: -2.671727253283191\n", + "P-Value: 0.07557911199765509\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.2996468926553672\n", + "T-Statistic: -5.609714558498771\n", + "P-Value: 0.011196176065376142\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1735\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1736\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4108606557377049\n", + "T-Statistic: -3.0138201768645576\n", + "P-Value: 0.05703829450534057\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.37542372881355934\n", + "T-Statistic: -3.3908313960680885\n", + "P-Value: 0.04274868888333008\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1736\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1737\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6833333333333333\n", + "Average of Other Ratios: 0.31509562841530053\n", + "T-Statistic: -12.567253014018696\n", + "P-Value: 0.0010862702962580325\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.35\n", + "Average of Other Ratios: 0.3093220338983051\n", + "T-Statistic: -3.1999999999999997\n", + "P-Value: 0.04933184296269623\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1737\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1738\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.48333333333333334\n", + "T-Statistic: -8.472825175856576\n", + "P-Value: 0.003451637475653648\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.38201506591337103\n", + "T-Statistic: -6.299599690538863\n", + "P-Value: 0.024284334261122562\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1738\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1739\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7704918032786885\n", + "Average of Other Ratios: 0.3583333333333333\n", + "T-Statistic: -12.770263120773526\n", + "P-Value: 0.001036015557970406\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.35021186440677965\n", + "T-Statistic: -13.369445204883407\n", + "P-Value: 0.0009045905316411595\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1739\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1740\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4918032786885246\n", + "Average of Other Ratios: 0.3708333333333333\n", + "T-Statistic: -2.3891724073960368\n", + "P-Value: 0.09681354824211097\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.2923728813559322\n", + "T-Statistic: -5.374733592842522\n", + "P-Value: 0.012611364632413758\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1740\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1741\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7\n", + "Average of Other Ratios: 0.4483606557377049\n", + "T-Statistic: -12.885005111050303\n", + "P-Value: 0.0010089720709264307\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.4219632768361582\n", + "T-Statistic: -39.40000000000022\n", + "P-Value: 3.59729328035016e-05\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1741\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1742\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.40416666666666673\n", + "T-Statistic: -5.830640867335859\n", + "P-Value: 0.010049473664175738\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.559322033898305\n", + "Average of Other Ratios: 0.3708333333333333\n", + "T-Statistic: -4.916019118203146\n", + "P-Value: 0.016122913876139212\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1742\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1743\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.37083333333333335\n", + "T-Statistic: -10.982657455064276\n", + "P-Value: 0.0016163522474045274\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.3008474576271187\n", + "T-Statistic: -13.987061690606863\n", + "P-Value: 0.0007913286284483068\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1743\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1744\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.38954918032786884\n", + "T-Statistic: -4.0760180019956564\n", + "P-Value: 0.02665876696412885\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.3177966101694915\n", + "T-Statistic: -4.258337907495993\n", + "P-Value: 0.023747193827929766\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1744\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1745\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.3447404371584699\n", + "T-Statistic: -3.192419176892379\n", + "P-Value: 0.0496190039928642\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.2572033898305085\n", + "T-Statistic: -4.033986241649274\n", + "P-Value: 0.027394411372010617\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1745\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1746\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.4438524590163935\n", + "T-Statistic: -1.784232222780322\n", + "P-Value: 0.17238357843904029\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3540489642184557\n", + "T-Statistic: -3.3684210526315805\n", + "P-Value: 0.07796676388459127\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1746\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1747\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.4143897996357013\n", + "T-Statistic: -8.657804801663772\n", + "P-Value: 0.013079710332824616\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.38813559322033897\n", + "T-Statistic: -4.245089541976653\n", + "P-Value: 0.02394449803344903\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1747\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1748\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.3875\n", + "T-Statistic: -5.491151239179569\n", + "P-Value: 0.011882700028816374\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3204802259887006\n", + "T-Statistic: -5.112836449747386\n", + "P-Value: 0.014477393996814832\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1748\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1749\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.46885245901639344\n", + "T-Statistic: -3.6820317950825556\n", + "P-Value: 0.03470816139558017\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.41949152542372886\n", + "T-Statistic: -6.516123792642811\n", + "P-Value: 0.007342666934462457\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1749\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1750\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.47500000000000003\n", + "T-Statistic: -3.5044017326627706\n", + "P-Value: 0.03935606956265976\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.4093220338983051\n", + "T-Statistic: -4.384615384615384\n", + "P-Value: 0.02196803265379433\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1750\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1751\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7704918032786885\n", + "Average of Other Ratios: 0.3458333333333333\n", + "T-Statistic: -15.423254035682769\n", + "P-Value: 0.0005921188778650158\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.32902542372881355\n", + "T-Statistic: -4.153671286484361\n", + "P-Value: 0.025365339369804084\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1751\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1752\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.75\n", + "Average of Other Ratios: 0.42745901639344264\n", + "T-Statistic: -24.49428445453386\n", + "P-Value: 0.00014916835502607742\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.367090395480226\n", + "T-Statistic: -3.028199837684228\n", + "P-Value: 0.05639133971192534\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1752\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1753\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7540983606557377\n", + "Average of Other Ratios: 0.4041666666666667\n", + "T-Statistic: -11.683939490062333\n", + "P-Value: 0.0013469985474943292\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3800847457627119\n", + "T-Statistic: -2.6499484677906273\n", + "P-Value: 0.07699722400216412\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1753\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1754\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.4125\n", + "T-Statistic: -3.7772132331663713\n", + "P-Value: 0.03250520782748611\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3713983050847458\n", + "T-Statistic: -3.082793063741217\n", + "P-Value: 0.054017563157762766\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1754\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1755\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.3\n", + "T-Statistic: -10.255681847231036\n", + "P-Value: 0.0019765537724212886\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.2866290018832392\n", + "T-Statistic: -4.709835325623289\n", + "P-Value: 0.04224443000728903\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1755\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1756\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.3360655737704918\n", + "T-Statistic: -10.610181401233849\n", + "P-Value: 0.00178889887952055\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.2532015065913371\n", + "T-Statistic: -2.2513229059561555\n", + "P-Value: 0.15321033928811528\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1756\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1757\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.8360655737704918\n", + "Average of Other Ratios: 0.3875\n", + "T-Statistic: -18.026271996123526\n", + "P-Value: 0.0003723594365652728\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3763653483992467\n", + "T-Statistic: -3.2352232779643906\n", + "P-Value: 0.0837182578425499\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1757\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1758\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6833333333333333\n", + "Average of Other Ratios: 0.3524590163934427\n", + "T-Statistic: -9.375725828304732\n", + "P-Value: 0.002570100386249655\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.33742937853107347\n", + "T-Statistic: -2.4658853737705697\n", + "P-Value: 0.09039381717094448\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1758\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1759\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.45833333333333337\n", + "T-Statistic: -7.472032792203066\n", + "P-Value: 0.004964056821106828\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.35847457627118645\n", + "T-Statistic: -5.143398239932954\n", + "P-Value: 0.014241825510816557\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1759\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1760\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.4308743169398907\n", + "T-Statistic: -4.435866726830125\n", + "P-Value: 0.04724822608025257\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.3813559322033898\n", + "T-Statistic: -4.676537180435968\n", + "P-Value: 0.018469641646674063\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1760\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1761\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.4208333333333334\n", + "T-Statistic: -5.724222068679529\n", + "P-Value: 0.010581602969978048\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.363135593220339\n", + "T-Statistic: -2.293068697718329\n", + "P-Value: 0.10565819843234399\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1761\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1762\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.4349726775956284\n", + "T-Statistic: -3.1471966691041255\n", + "P-Value: 0.05137660775019963\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.38375706214689265\n", + "T-Statistic: -2.3710395141336553\n", + "P-Value: 0.09841141140216388\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1762\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1763\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.4689207650273224\n", + "T-Statistic: -2.927463218319362\n", + "P-Value: 0.0611258808814014\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.42664783427495295\n", + "T-Statistic: -3.434253416143983\n", + "P-Value: 0.07533225481738264\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1763\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1764\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.2701502732240437\n", + "T-Statistic: -3.40514995545028\n", + "P-Value: 0.042300988302641715\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.2542372881355932\n", + "Average of Other Ratios: 0.2189265536723164\n", + "T-Statistic: -2.272727272727272\n", + "P-Value: 0.15095548648584448\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1764\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1765\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.4566256830601093\n", + "T-Statistic: -3.4235368770297985\n", + "P-Value: 0.04173478390661575\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.3940677966101695\n", + "T-Statistic: -1.6131196118561335\n", + "P-Value: 0.20511808054337077\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1765\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1766\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.4398224043715847\n", + "T-Statistic: -10.558130916542433\n", + "P-Value: 0.0018149271537199203\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.4224576271186441\n", + "T-Statistic: -2.122911802712327\n", + "P-Value: 0.1238400486795913\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1766\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1767\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.45416666666666666\n", + "T-Statistic: -5.7684452397766\n", + "P-Value: 0.010356094063164631\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.3983050847457627\n", + "T-Statistic: -3.4756486205215467\n", + "P-Value: 0.04018159901970025\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1767\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1768\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.41454918032786886\n", + "T-Statistic: -2.3715832376714867\n", + "P-Value: 0.09836303411399833\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.4067796610169492\n", + "T-Statistic: -3.4985949560740326\n", + "P-Value: 0.03952103415356299\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1768\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1769\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.3109289617486338\n", + "T-Statistic: -6.228218140785886\n", + "P-Value: 0.008345917876290724\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.23634651600753295\n", + "T-Statistic: -3.432432432432432\n", + "P-Value: 0.07540339116695642\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1769\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1770\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.3916666666666667\n", + "T-Statistic: -5.9956272290627375\n", + "P-Value: 0.00929176090610574\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3125\n", + "T-Statistic: -3.6292186583582198\n", + "P-Value: 0.03601320371125468\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1770\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1771\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.42745901639344264\n", + "T-Statistic: -26.610860299950033\n", + "P-Value: 0.00011643687772756474\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.38757062146892657\n", + "T-Statistic: -6.714285714285707\n", + "P-Value: 0.021470189769271106\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1771\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1772\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.35000000000000003\n", + "T-Statistic: -3.224205259523217\n", + "P-Value: 0.048428898137238975\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.30508474576271183\n", + "T-Statistic: -5.099428072506434\n", + "P-Value: 0.01458234892459302\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1772\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1773\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7213114754098361\n", + "Average of Other Ratios: 0.3916666666666666\n", + "T-Statistic: -8.7724963613773\n", + "P-Value: 0.0031199616023164897\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.38333333333333336\n", + "Average of Other Ratios: 0.3432203389830508\n", + "T-Statistic: -4.943805429141822\n", + "P-Value: 0.015876322370823124\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1773\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1774\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.33128415300546443\n", + "T-Statistic: -3.6830493742159134\n", + "P-Value: 0.03468361534744764\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.28255649717514125\n", + "T-Statistic: -7.881999450070924\n", + "P-Value: 0.004255505421232205\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1774\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1775\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.4125\n", + "T-Statistic: -4.161173917605138\n", + "P-Value: 0.025244699113682725\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.40494350282485875\n", + "T-Statistic: -5.023746418203985\n", + "P-Value: 0.015193736814334122\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1775\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1776\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.49583333333333335\n", + "T-Statistic: -2.859986118208274\n", + "P-Value: 0.0645777020694818\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.559322033898305\n", + "Average of Other Ratios: 0.4595338983050848\n", + "T-Statistic: -2.392754433686525\n", + "P-Value: 0.09650164759323027\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1776\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1777\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.435724043715847\n", + "T-Statistic: -3.1832159126292936\n", + "P-Value: 0.04997046721617775\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.35459039548022603\n", + "T-Statistic: -5.888771927947271\n", + "P-Value: 0.009773561677083785\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1777\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1778\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.4166666666666667\n", + "T-Statistic: -8.499595555559111\n", + "P-Value: 0.003420163998746116\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.32478813559322034\n", + "T-Statistic: -12.454354190804333\n", + "P-Value: 0.0011156250386489563\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1778\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1779\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.46038251366120214\n", + "T-Statistic: -2.3871021119027693\n", + "P-Value: 0.09699437660783286\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.3755649717514124\n", + "T-Statistic: -3.3684353047674622\n", + "P-Value: 0.04346109463220811\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1779\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1780\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.3898224043715847\n", + "T-Statistic: -1.856072621295239\n", + "P-Value: 0.16045407134930564\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3672316384180791\n", + "T-Statistic: -4.082482904638627\n", + "P-Value: 0.026547885467199526\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1780\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1781\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.3833333333333333\n", + "T-Statistic: -3.203403812225747\n", + "P-Value: 0.049203590003784436\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.35402542372881357\n", + "T-Statistic: -2.5536843901385566\n", + "P-Value: 0.08367347896165804\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1781\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1782\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.4000000000000001\n", + "T-Statistic: -37.585613102377934\n", + "P-Value: 4.1428594619016965e-05\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.4175141242937853\n", + "T-Statistic: -1.946993604286342\n", + "P-Value: 0.1467043534415175\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1782\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1783\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.3625\n", + "T-Statistic: -9.790972522123287\n", + "P-Value: 0.002264226155807123\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.37966101694915255\n", + "T-Statistic: -3.993713653195303\n", + "P-Value: 0.028123969733242882\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1783\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1784\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.3983606557377049\n", + "T-Statistic: -5.666139775569049\n", + "P-Value: 0.010887658149858946\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.354590395480226\n", + "T-Statistic: -2.8307905599569243\n", + "P-Value: 0.06614653353035789\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1784\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1785\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.37698087431693994\n", + "T-Statistic: -3.066930962984633\n", + "P-Value: 0.054694091557813726\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.32090395480225986\n", + "T-Statistic: -3.177686792530804\n", + "P-Value: 0.05018313128134387\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1785\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1786\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.4291666666666667\n", + "T-Statistic: -3.794152560630122\n", + "P-Value: 0.03213210729307197\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.4304378531073446\n", + "T-Statistic: -6.772008114046649\n", + "P-Value: 0.006580141599024013\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1786\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1787\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.4375\n", + "T-Statistic: -4.005635792635015\n", + "P-Value: 0.027905415031102557\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3839689265536724\n", + "T-Statistic: -5.701874626813004\n", + "P-Value: 0.010698008944756929\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1787\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1788\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.4398907103825137\n", + "T-Statistic: -15.578682000811366\n", + "P-Value: 0.0005747431214231546\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.3542372881355932\n", + "T-Statistic: -5.854047900359961\n", + "P-Value: 0.009937162378535997\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1788\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1789\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.4480191256830601\n", + "T-Statistic: -6.166892456957159\n", + "P-Value: 0.008582542762464446\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.3412429378531074\n", + "T-Statistic: -3.8779880896871726\n", + "P-Value: 0.030364316576825364\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1789\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1790\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.44760928961748636\n", + "T-Statistic: -3.4952014484831047\n", + "P-Value: 0.039617848196352085\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.42196327683615825\n", + "T-Statistic: -3.458948005905017\n", + "P-Value: 0.04067122554425211\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1790\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1791\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.3541666666666667\n", + "T-Statistic: -3.9615442295558925\n", + "P-Value: 0.028724785655088337\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.3135593220338983\n", + "T-Statistic: -1.8272855362744203\n", + "P-Value: 0.16511600726806364\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1791\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1792\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.8032786885245902\n", + "Average of Other Ratios: 0.29583333333333334\n", + "T-Statistic: -12.539145243620842\n", + "P-Value: 0.0010934817370295948\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3377118644067797\n", + "T-Statistic: -6.269358747755155\n", + "P-Value: 0.00819196116993917\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1792\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1793\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.45416666666666666\n", + "T-Statistic: -7.1071028723917875\n", + "P-Value: 0.0057315850409424066\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5932203389830508\n", + "Average of Other Ratios: 0.40939265536723163\n", + "T-Statistic: -13.436639703627074\n", + "P-Value: 0.0008912634414880094\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1793\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1794\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.47540983606557374\n", + "Average of Other Ratios: 0.42083333333333334\n", + "T-Statistic: -6.840395759439469\n", + "P-Value: 0.006394156517694729\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.2910310734463277\n", + "T-Statistic: -4.146372958429369\n", + "P-Value: 0.025483409105270218\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1794\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1795\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.4083333333333334\n", + "T-Statistic: -4.492763882679988\n", + "P-Value: 0.020579223849711902\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.3313559322033898\n", + "T-Statistic: -1.9155635193164227\n", + "P-Value: 0.19549520636629938\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1795\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1796\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.41905737704918034\n", + "T-Statistic: -6.292690992312831\n", + "P-Value: 0.008106302500641403\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.38728813559322034\n", + "T-Statistic: -2.7837837837837838\n", + "P-Value: 0.10845063612547547\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1796\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1797\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7\n", + "Average of Other Ratios: 0.4064890710382514\n", + "T-Statistic: -7.456642804764441\n", + "P-Value: 0.004993590238372753\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.321045197740113\n", + "T-Statistic: -5.483588555093928\n", + "P-Value: 0.011928337117442485\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1797\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1798\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.4083333333333334\n", + "T-Statistic: -2.8639314065795065\n", + "P-Value: 0.06436927783811348\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3612994350282486\n", + "T-Statistic: -14.789473684210558\n", + "P-Value: 0.042980044870241256\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1798\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1799\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.4416666666666667\n", + "T-Statistic: -4.673530022214723\n", + "P-Value: 0.018501844031635108\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.3962570621468926\n", + "T-Statistic: -3.830072253349244\n", + "P-Value: 0.0313589410287926\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1799\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1800\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.3916666666666667\n", + "T-Statistic: -4.31785074614014\n", + "P-Value: 0.02288635431540596\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3246468926553672\n", + "T-Statistic: -3.659896369736786\n", + "P-Value: 0.035247654519723955\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1800\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1801\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7704918032786885\n", + "Average of Other Ratios: 0.3125\n", + "T-Statistic: -21.98360655737705\n", + "P-Value: 0.0002060384328133572\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.2919962335216572\n", + "T-Statistic: -3.5407885315000662\n", + "P-Value: 0.07133333882574665\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1801\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1802\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.38333333333333336\n", + "T-Statistic: -4.785407410045699\n", + "P-Value: 0.017351257215718095\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3584039548022599\n", + "T-Statistic: -3.1685329501250483\n", + "P-Value: 0.050537730061605154\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1802\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1803\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45901639344262296\n", + "Average of Other Ratios: 0.35\n", + "T-Statistic: -3.0834492425511746\n", + "P-Value: 0.05398980423292988\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.32076271186440675\n", + "T-Statistic: -6.808401479691212\n", + "P-Value: 0.006480293784839257\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1803\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1804\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.39166666666666666\n", + "T-Statistic: -3.7805287666533993\n", + "P-Value: 0.032431745470329315\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.34625706214689267\n", + "T-Statistic: -2.0407541348149305\n", + "P-Value: 0.13394942774558222\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1804\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1805\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.39166666666666666\n", + "T-Statistic: -5.539911623921449\n", + "P-Value: 0.011593870922493347\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.3516949152542373\n", + "T-Statistic: -5.158515399269249\n", + "P-Value: 0.014127150588176329\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1805\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1806\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.410724043715847\n", + "T-Statistic: -11.448148657407378\n", + "P-Value: 0.0014304164055433837\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.37146892655367225\n", + "T-Statistic: -5.367066741284801\n", + "P-Value: 0.012661377799601295\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1806\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1807\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.4291666666666667\n", + "T-Statistic: -3.064527056875999\n", + "P-Value: 0.05479754804131916\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3967514124293785\n", + "T-Statistic: -2.4964946840644773\n", + "P-Value: 0.08797820274589993\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1807\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1808\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4691256830601093\n", + "T-Statistic: -4.840779946644281\n", + "P-Value: 0.016816135457307743\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.4745762711864407\n", + "T-Statistic: -2.3551362310207575\n", + "P-Value: 0.09983927564004276\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1808\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1809\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.45669398907103825\n", + "T-Statistic: -4.048874395684384\n", + "P-Value: 0.02713088105493987\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.4009180790960452\n", + "T-Statistic: -8.122451922976826\n", + "P-Value: 0.003901259023001135\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1809\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1810\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.39583333333333337\n", + "T-Statistic: -2.771678715670956\n", + "P-Value: 0.0694710760166772\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3456920903954802\n", + "T-Statistic: -4.094924639556887\n", + "P-Value: 0.02633616071520719\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1810\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1811\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.44836065573770495\n", + "T-Statistic: -3.137402547907732\n", + "P-Value: 0.051767564979331604\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3463276836158192\n", + "T-Statistic: -3.5851541120965997\n", + "P-Value: 0.03715039979658868\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1811\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1812\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.35833333333333334\n", + "T-Statistic: -9.207415437764503\n", + "P-Value: 0.0027096685823184735\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3125\n", + "T-Statistic: -2.848001248439178\n", + "P-Value: 0.06521603391989962\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1812\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1813\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.37916666666666665\n", + "T-Statistic: -6.7690431384831955\n", + "P-Value: 0.006588365191149356\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.2955508474576271\n", + "T-Statistic: -5.314610961015995\n", + "P-Value: 0.013010610801463965\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1813\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1814\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.43995901639344265\n", + "T-Statistic: -6.955531695206198\n", + "P-Value: 0.0060963729996066736\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3711864406779661\n", + "T-Statistic: -6.051234040593998\n", + "P-Value: 0.009053328678904286\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1814\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1815\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.37916666666666665\n", + "T-Statistic: -4.17397260708643\n", + "P-Value: 0.02504060302241229\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.2994350282485876\n", + "T-Statistic: -2.6261286571944504\n", + "P-Value: 0.07858610153137385\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1815\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1816\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7213114754098361\n", + "Average of Other Ratios: 0.3875\n", + "T-Statistic: -13.414708580029796\n", + "P-Value: 0.0008955843183091936\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.35593220338983056\n", + "T-Statistic: -3.925344995453686\n", + "P-Value: 0.02942075287920854\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1816\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1817\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.5266393442622951\n", + "T-Statistic: -2.0100838668302545\n", + "P-Value: 0.13797226511673932\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.43481638418079094\n", + "T-Statistic: -4.495559790237215\n", + "P-Value: 0.02054484962621015\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1817\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1818\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.3726775956284153\n", + "T-Statistic: -3.954974925804998\n", + "P-Value: 0.028849502427145693\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3416666666666667\n", + "T-Statistic: -5.211728279194447\n", + "P-Value: 0.01373294914761014\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1818\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1819\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.44583333333333336\n", + "T-Statistic: -6.313455744839042\n", + "P-Value: 0.00803105673664604\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.4387711864406779\n", + "T-Statistic: -3.071254985149466\n", + "P-Value: 0.05450861721232613\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1819\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1820\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.44815573770491807\n", + "T-Statistic: -4.067460467766922\n", + "P-Value: 0.02680645933365517\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3923022598870056\n", + "T-Statistic: -2.724050147464563\n", + "P-Value: 0.07230209895638379\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1820\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1821\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.475\n", + "T-Statistic: -3.0602917588065157\n", + "P-Value: 0.054980421525545886\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.576271186440678\n", + "Average of Other Ratios: 0.40494350282485875\n", + "T-Statistic: -6.15102387874345\n", + "P-Value: 0.008645202687168257\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1821\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1822\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.3775273224043716\n", + "T-Statistic: -9.402775448335925\n", + "P-Value: 0.0025485643327779633\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3374293785310734\n", + "T-Statistic: -2.6487965059751333\n", + "P-Value: 0.07707314575519472\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1822\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1823\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.36489071038251364\n", + "T-Statistic: -8.409157084603672\n", + "P-Value: 0.003528044949249387\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.2919962335216572\n", + "T-Statistic: -9.926980271680543\n", + "P-Value: 0.009995759216947474\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1823\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1824\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.42725409836065575\n", + "T-Statistic: -3.221457895636802\n", + "P-Value: 0.04853032779203252\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3204802259887006\n", + "T-Statistic: -6.442102133047244\n", + "P-Value: 0.007584755136376526\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1824\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1825\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.4625\n", + "T-Statistic: -3.0100455180506156\n", + "P-Value: 0.05720966272875764\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.4279661016949153\n", + "T-Statistic: -4.9770903720375195\n", + "P-Value: 0.015587358121496146\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1825\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1826\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.385792349726776\n", + "T-Statistic: -4.6686976061393075\n", + "P-Value: 0.018553745232426106\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.3432203389830508\n", + "T-Statistic: -10.881765043296808\n", + "P-Value: 0.0016608270081661155\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1826\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1827\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.37732240437158465\n", + "T-Statistic: -5.919359446594679\n", + "P-Value: 0.009632363464382891\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.38799435028248586\n", + "T-Statistic: -1.7579248960751477\n", + "P-Value: 0.17700539897601802\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1827\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1828\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.45\n", + "T-Statistic: -2.3790961945021833\n", + "P-Value: 0.0976975284567096\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.36271186440677966\n", + "T-Statistic: -3.4834542887072035\n", + "P-Value: 0.03995532706040686\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1828\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1829\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.47274590163934427\n", + "T-Statistic: -6.685134180648596\n", + "P-Value: 0.006826809830237584\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.40932203389830507\n", + "T-Statistic: -12.89316271749542\n", + "P-Value: 0.0010070852904372337\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1829\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1830\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.42500000000000004\n", + "T-Statistic: -1.7723627686294894\n", + "P-Value: 0.17445150254614536\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3586158192090395\n", + "T-Statistic: -5.301653087443131\n", + "P-Value: 0.013098812973106142\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1830\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1831\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.4687158469945355\n", + "T-Statistic: -2.42952422971014\n", + "P-Value: 0.09336973169699041\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.41101694915254233\n", + "T-Statistic: -2.6934842013379563\n", + "P-Value: 0.07419455966723891\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1831\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1832\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.44781420765027324\n", + "T-Statistic: -2.175312723682659\n", + "P-Value: 0.1178663975785963\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.36694915254237287\n", + "T-Statistic: -2.416081794014973\n", + "P-Value: 0.0945001250957143\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1832\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1833\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.42698087431693993\n", + "T-Statistic: -2.926838266377036\n", + "P-Value: 0.06115677716147604\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.35028248587570626\n", + "T-Statistic: -4.358898943540671\n", + "P-Value: 0.02231600572520053\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1833\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1834\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.4226775956284153\n", + "T-Statistic: -3.0320096719785434\n", + "P-Value: 0.05622147844106881\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.35882768361581924\n", + "T-Statistic: -2.596202410283425\n", + "P-Value: 0.08064011223189176\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1834\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1835\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.42500000000000004\n", + "T-Statistic: -8.165010792168419\n", + "P-Value: 0.0038426628271698176\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3968220338983051\n", + "T-Statistic: -2.5012480459007254\n", + "P-Value: 0.08761020945997577\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1835\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1836\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7166666666666667\n", + "Average of Other Ratios: 0.40689890710382515\n", + "T-Statistic: -13.281769925732059\n", + "P-Value: 0.00092238087463812\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.40918079096045196\n", + "T-Statistic: -3.6547443425101043\n", + "P-Value: 0.03537475739142397\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1836\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1837\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7540983606557377\n", + "Average of Other Ratios: 0.4541666666666667\n", + "T-Statistic: -7.998178506375221\n", + "P-Value: 0.004079263296996052\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.4093220338983051\n", + "T-Statistic: -6.728880964155069\n", + "P-Value: 0.006701102825924804\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1837\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1838\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.4083333333333333\n", + "T-Statistic: -9.166095697580067\n", + "P-Value: 0.002745459142872884\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3880649717514124\n", + "T-Statistic: -3.6962350907584107\n", + "P-Value: 0.03436754879976233\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1838\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1839\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.3903688524590164\n", + "T-Statistic: -2.6475959716199546\n", + "P-Value: 0.07715236726431632\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.3135593220338983\n", + "T-Statistic: -3.0363270659271957\n", + "P-Value: 0.056029764059456005\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1839\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1840\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4352459016393443\n", + "T-Statistic: -2.217999452996241\n", + "P-Value: 0.11325398002800678\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.3624293785310735\n", + "T-Statistic: -4.007620942190811\n", + "P-Value: 0.02786923613375584\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1840\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1841\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.4\n", + "T-Statistic: -2.7939211613370594\n", + "P-Value: 0.06819623625179277\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3442090395480226\n", + "T-Statistic: -1.1660649819494582\n", + "P-Value: 0.4512877748622783\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1841\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1842\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.3810109289617486\n", + "T-Statistic: -3.117346730699978\n", + "P-Value: 0.052579921647726034\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.36299435028248583\n", + "T-Statistic: -5.564047150200577\n", + "P-Value: 0.011454300879796516\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1842\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1843\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.39419398907103825\n", + "T-Statistic: -3.6055517374511754\n", + "P-Value: 0.036618371391318225\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.35593220338983045\n", + "T-Statistic: -4.53155602414888\n", + "P-Value: 0.020108869526374775\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1843\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1844\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.41250000000000003\n", + "T-Statistic: -3.8013214837489318\n", + "P-Value: 0.03197586070428311\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.3728813559322034\n", + "T-Statistic: -3.2995600879804483\n", + "P-Value: 0.04574853906846063\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1844\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1845\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.41666666666666663\n", + "T-Statistic: -5.316864560968578\n", + "P-Value: 0.01299535017353051\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3801553672316384\n", + "T-Statistic: -2.6218256817858103\n", + "P-Value: 0.0788774316850411\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1845\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1846\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.37916666666666665\n", + "T-Statistic: -11.831027585036374\n", + "P-Value: 0.001298210003952447\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.34625706214689267\n", + "T-Statistic: -2.2952786442989725\n", + "P-Value: 0.10544407020716878\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1846\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1847\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.4041666666666667\n", + "T-Statistic: -8.063231850117097\n", + "P-Value: 0.003984775525196651\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.34194915254237285\n", + "T-Statistic: -7.414573731136393\n", + "P-Value: 0.005075512686812491\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1847\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1848\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.3689890710382514\n", + "T-Statistic: -4.439726457213278\n", + "P-Value: 0.021245642150683707\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.29540960451977405\n", + "T-Statistic: -4.994116161502297\n", + "P-Value: 0.015442194581693753\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1848\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1849\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7166666666666667\n", + "Average of Other Ratios: 0.3025273224043716\n", + "T-Statistic: -11.897761281046622\n", + "P-Value: 0.001276847670564472\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.2660310734463277\n", + "T-Statistic: -4.308319914295902\n", + "P-Value: 0.02302147063382807\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1849\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1850\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.44166666666666665\n", + "T-Statistic: -4.990732440102222\n", + "P-Value: 0.015470903850084424\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.392090395480226\n", + "T-Statistic: -3.99713035331454\n", + "P-Value: 0.028061110834153927\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1850\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1851\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.3980874316939891\n", + "T-Statistic: -3.5674165199732903\n", + "P-Value: 0.03762109314075504\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.34745762711864403\n", + "T-Statistic: -3.430686205868627\n", + "P-Value: 0.04151723186686211\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1851\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1852\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.40273224043715844\n", + "T-Statistic: -4.589204955202506\n", + "P-Value: 0.019435305052459464\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.35854519774011295\n", + "T-Statistic: -2.331934255279095\n", + "P-Value: 0.10196787590396043\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1852\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1853\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.3774590163934426\n", + "T-Statistic: -13.772608526452023\n", + "P-Value: 0.0008284008874866314\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.2998587570621469\n", + "T-Statistic: -5.194000650735758\n", + "P-Value: 0.013862662574053051\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1853\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1854\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.39405737704918037\n", + "T-Statistic: -4.944621149115616\n", + "P-Value: 0.015869157656486683\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.2990819209039548\n", + "T-Statistic: -3.51802668500397\n", + "P-Value: 0.038972429516855533\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1854\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1855\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.37315573770491806\n", + "T-Statistic: -4.038348082595869\n", + "P-Value: 0.027316863729051996\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.34173728813559323\n", + "T-Statistic: -2.5913248074535873\n", + "P-Value: 0.08098113762773655\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1855\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1856\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.40259562841530055\n", + "T-Statistic: -3.6156991795445577\n", + "P-Value: 0.0363573206604073\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.3347457627118644\n", + "T-Statistic: -5.6429955425120175\n", + "P-Value: 0.011012841275964533\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1856\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1857\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.4\n", + "T-Statistic: -10.42437348156729\n", + "P-Value: 0.0018841578114493912\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3794491525423729\n", + "T-Statistic: -3.26894241254243\n", + "P-Value: 0.04681436566609524\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1857\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1858\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.4603142076502732\n", + "T-Statistic: -2.0946414252764485\n", + "P-Value: 0.1272128293571138\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5423728813559322\n", + "Average of Other Ratios: 0.40084745762711865\n", + "T-Statistic: -3.985615868631576\n", + "P-Value: 0.028273674351328677\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1858\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1859\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.41905737704918034\n", + "T-Statistic: -3.9625676262993768\n", + "P-Value: 0.02870541916232176\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.34597457627118644\n", + "T-Statistic: -6.54645011829731\n", + "P-Value: 0.007246419253764158\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1859\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1860\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7377049180327869\n", + "Average of Other Ratios: 0.29166666666666663\n", + "T-Statistic: -14.478449297651643\n", + "P-Value: 0.0007143254209499869\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.2955508474576271\n", + "T-Statistic: -3.1245537578699243\n", + "P-Value: 0.05228616491106145\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1860\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1861\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.38749999999999996\n", + "T-Statistic: -10.080682221460885\n", + "P-Value: 0.0020788624411748344\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.2823446327683616\n", + "T-Statistic: -3.3366481297841037\n", + "P-Value: 0.04449831598300532\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1861\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1862\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.37916666666666665\n", + "T-Statistic: -13.36742611721058\n", + "P-Value: 0.0009049950751945303\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.3898305084745763\n", + "T-Statistic: -4.966017854713506\n", + "P-Value: 0.015682718436889013\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1862\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1863\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4918032786885246\n", + "Average of Other Ratios: 0.45\n", + "T-Statistic: -2.0479340472449516\n", + "P-Value: 0.13302782647013972\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3248587570621469\n", + "T-Statistic: -3.666081619407081\n", + "P-Value: 0.03509583190764937\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1863\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1864\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.42916666666666664\n", + "T-Statistic: -4.986422813713202\n", + "P-Value: 0.015507569745157936\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.35466101694915253\n", + "T-Statistic: -3.9441648534374134\n", + "P-Value: 0.029056249071256115\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1864\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1865\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.441712204007286\n", + "T-Statistic: -2.327272727272727\n", + "P-Value: 0.14541235094098032\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.36864406779661013\n", + "T-Statistic: -1.786828180527595\n", + "P-Value: 0.17193506795662192\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1865\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1866\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6833333333333333\n", + "Average of Other Ratios: 0.45997267759562843\n", + "T-Statistic: -3.7679579839771797\n", + "P-Value: 0.03271140841418524\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6440677966101694\n", + "Average of Other Ratios: 0.4304378531073446\n", + "T-Statistic: -11.862745098039216\n", + "P-Value: 0.001287998117223525\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1866\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1867\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.4976775956284153\n", + "T-Statistic: -5.300050196391964\n", + "P-Value: 0.013109777815587411\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.576271186440678\n", + "Average of Other Ratios: 0.43036723163841806\n", + "T-Statistic: -3.707083667393408\n", + "P-Value: 0.034110263406932406\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1867\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1868\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.8032786885245902\n", + "Average of Other Ratios: 0.4041666666666667\n", + "T-Statistic: -31.928961748633885\n", + "P-Value: 6.751260437384004e-05\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.379590395480226\n", + "T-Statistic: -3.550388805186171\n", + "P-Value: 0.03808013155197999\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1868\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1869\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.3358606557377049\n", + "T-Statistic: -2.42291804775351\n", + "P-Value: 0.09392318031625656\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.35\n", + "Average of Other Ratios: 0.2838983050847458\n", + "T-Statistic: -5.199999999999993\n", + "P-Value: 0.013818586905132142\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1869\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1870\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.4194672131147541\n", + "T-Statistic: -3.4472491733646837\n", + "P-Value: 0.041018740078086716\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3840395480225988\n", + "T-Statistic: -3.832436694700159\n", + "P-Value: 0.03130888812484261\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1870\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1871\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.39583333333333337\n", + "T-Statistic: -4.526712258059946\n", + "P-Value: 0.02016683338787022\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3586864406779661\n", + "T-Statistic: -3.3503877475316646\n", + "P-Value: 0.04404617677686765\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1871\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1872\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.48101092896174863\n", + "T-Statistic: -3.9726503315641613\n", + "P-Value: 0.02851551151540454\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5423728813559322\n", + "Average of Other Ratios: 0.39244350282485874\n", + "T-Statistic: -9.21746279562317\n", + "P-Value: 0.0027010591887466347\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1872\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1873\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.4441256830601093\n", + "T-Statistic: -9.737517564732535\n", + "P-Value: 0.00230079619793927\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3964689265536723\n", + "T-Statistic: -2.3349757965803564\n", + "P-Value: 0.10168572245232489\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1873\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1874\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7704918032786885\n", + "Average of Other Ratios: 0.38749999999999996\n", + "T-Statistic: -6.709762545561513\n", + "P-Value: 0.006755661879342611\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.40911016949152545\n", + "T-Statistic: -2.509542503119258\n", + "P-Value: 0.08697258350325465\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1874\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1875\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.3202185792349727\n", + "T-Statistic: -8.067567567567563\n", + "P-Value: 0.015019105066650768\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3287429378531073\n", + "T-Statistic: -2.9086605723710677\n", + "P-Value: 0.06206405682692753\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1875\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1876\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4229508196721311\n", + "T-Statistic: -4.277954683338313\n", + "P-Value: 0.023458887518037853\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3425141242937853\n", + "T-Statistic: -2.187920293054828\n", + "P-Value: 0.11648102879543593\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1876\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1877\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.4062841530054645\n", + "T-Statistic: -2.608245612118408\n", + "P-Value: 0.0798056461655953\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.36694915254237287\n", + "T-Statistic: -2.2961086998148015\n", + "P-Value: 0.10536377934120532\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1877\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1878\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.423155737704918\n", + "T-Statistic: -3.308126758133395\n", + "P-Value: 0.045455844114838176\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.4152542372881356\n", + "T-Statistic: -3.175846343890085\n", + "P-Value: 0.05025417308948302\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1878\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1879\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.8032786885245902\n", + "Average of Other Ratios: 0.2916666666666667\n", + "T-Statistic: -13.189427229234468\n", + "P-Value: 0.0009416245667257401\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3559322033898305\n", + "Average of Other Ratios: 0.29971751412429376\n", + "T-Statistic: -3.099929158257806\n", + "P-Value: 0.05329849219831802\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1879\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1880\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.40239071038251367\n", + "T-Statistic: -2.8278186890378114\n", + "P-Value: 0.06630888074205549\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.3305084745762712\n", + "T-Statistic: -6.072629199681584\n", + "P-Value: 0.008963729862100802\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1880\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1881\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.4270491803278688\n", + "T-Statistic: -2.1944780188324806\n", + "P-Value: 0.11576816528411227\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.31101694915254235\n", + "T-Statistic: -7.153846153846161\n", + "P-Value: 0.08841694696574594\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1881\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1882\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.3773224043715847\n", + "T-Statistic: -3.575689868461351\n", + "P-Value: 0.03740060612655586\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3038135593220339\n", + "T-Statistic: -7.40913346851445\n", + "P-Value: 0.005086235812941151\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1882\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1883\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.43333333333333335\n", + "T-Statistic: -6.573390283759843\n", + "P-Value: 0.007162305426399894\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.4152542372881356\n", + "T-Statistic: -2.2929844695893116\n", + "P-Value: 0.10566636994498525\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1883\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1884\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7377049180327869\n", + "Average of Other Ratios: 0.425\n", + "T-Statistic: -7.134072833363263\n", + "P-Value: 0.005669714559524623\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5423728813559322\n", + "Average of Other Ratios: 0.37521186440677967\n", + "T-Statistic: -4.801612169541382\n", + "P-Value: 0.017192389686773397\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1884\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1885\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.39999999999999997\n", + "T-Statistic: -5.171937592179927\n", + "P-Value: 0.014026340881299086\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.33785310734463275\n", + "T-Statistic: -3.9583928774422366\n", + "P-Value: 0.028784526660768115\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1885\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1886\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.4687158469945355\n", + "T-Statistic: -2.666321218691447\n", + "P-Value: 0.07592808657201537\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3879237288135593\n", + "T-Statistic: -2.7149672381577274\n", + "P-Value: 0.07285814285484306\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1886\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1887\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.4603142076502732\n", + "T-Statistic: -3.6716000925111048\n", + "P-Value: 0.03496108120386053\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.38983050847457623\n", + "T-Statistic: -7.2205339907178026\n", + "P-Value: 0.005477206760423499\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1887\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1888\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.41045081967213115\n", + "T-Statistic: -1.9837288411299292\n", + "P-Value: 0.14154345106445665\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.3686440677966102\n", + "T-Statistic: -5.918640302493723\n", + "P-Value: 0.009635652367993155\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1888\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1889\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.4400273224043716\n", + "T-Statistic: -4.325688830675532\n", + "P-Value: 0.022776003224226602\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3987758945386064\n", + "T-Statistic: -1.8820851842053508\n", + "P-Value: 0.20054056943775883\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1889\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1890\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.42288251366120216\n", + "T-Statistic: -2.3707547764032237\n", + "P-Value: 0.09843675721882074\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.43015536723163844\n", + "T-Statistic: -2.973639372537061\n", + "P-Value: 0.05889612816583925\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1890\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1891\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.42500000000000004\n", + "T-Statistic: -3.0224716644609666\n", + "P-Value: 0.056647945467235325\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.33361581920903954\n", + "T-Statistic: -4.658888265058127\n", + "P-Value: 0.018659682503791214\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1891\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1892\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.3791666666666667\n", + "T-Statistic: -5.283658664721413\n", + "P-Value: 0.013222597948006223\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.37097457627118646\n", + "T-Statistic: -3.2449363634867825\n", + "P-Value: 0.04767210916456234\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1892\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1893\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.36885245901639346\n", + "T-Statistic: -2.7563407387163426\n", + "P-Value: 0.07036747746122053\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.36016949152542377\n", + "T-Statistic: -2.612964350541548\n", + "P-Value: 0.07948159398225181\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1893\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1894\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.32499999999999996\n", + "T-Statistic: -16.552311734273484\n", + "P-Value: 0.0004799737975285094\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.32937853107344633\n", + "T-Statistic: -4.435658555321488\n", + "P-Value: 0.021297902760540714\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1894\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1895\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.43128415300546447\n", + "T-Statistic: -5.492433465454693\n", + "P-Value: 0.011874985075137982\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3543785310734463\n", + "T-Statistic: -7.723907549595242\n", + "P-Value: 0.004511849154258445\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1895\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1896\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.4478142076502733\n", + "T-Statistic: -4.942348751891569\n", + "P-Value: 0.01588912728706134\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.41765536723163843\n", + "T-Statistic: -6.264933372064462\n", + "P-Value: 0.008208341834463534\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1896\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1897\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.40225409836065573\n", + "T-Statistic: -6.568502779421056\n", + "P-Value: 0.007177469697378324\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3501412429378531\n", + "T-Statistic: -4.144463927530725\n", + "P-Value: 0.02551440964648421\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1897\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1898\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.3775273224043716\n", + "T-Statistic: -10.30131962224801\n", + "P-Value: 0.0019509748009049809\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.30798022598870056\n", + "T-Statistic: -8.186715143005392\n", + "P-Value: 0.003813227168001511\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1898\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1899\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.4235655737704918\n", + "T-Statistic: -2.574541708593826\n", + "P-Value: 0.08216821527460087\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3123587570621469\n", + "T-Statistic: -4.795397246883075\n", + "P-Value: 0.017253094776815005\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1899\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1900\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.45614754098360655\n", + "T-Statistic: -2.6559601893971574\n", + "P-Value: 0.07660250967292412\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.40254237288135597\n", + "T-Statistic: -3.012278225167804\n", + "P-Value: 0.0571082206033358\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1900\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1901\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.32916666666666666\n", + "T-Statistic: -11.674705169820832\n", + "P-Value: 0.0013501421719868342\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.3008474576271186\n", + "T-Statistic: -3.099405715532204\n", + "P-Value: 0.053320277974167773\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1901\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1902\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4108606557377049\n", + "T-Statistic: -6.418429391739877\n", + "P-Value: 0.007664387655280491\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.29562146892655367\n", + "T-Statistic: -3.85816763155035\n", + "P-Value: 0.030770761179515826\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1902\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1903\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7540983606557377\n", + "Average of Other Ratios: 0.4208333333333334\n", + "T-Statistic: -15.206265335945545\n", + "P-Value: 0.0006175648734416628\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3714689265536723\n", + "T-Statistic: -4.782389009187055\n", + "P-Value: 0.017381059516950674\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1903\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1904\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.3625\n", + "T-Statistic: -3.600412240200614\n", + "P-Value: 0.036751502378933966\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.3220338983050848\n", + "T-Statistic: -3.7931280643843954\n", + "P-Value: 0.03215451606094398\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1904\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1905\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7\n", + "Average of Other Ratios: 0.419603825136612\n", + "T-Statistic: -5.8545065009896256\n", + "P-Value: 0.00993497840099364\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.40084745762711865\n", + "T-Statistic: -2.4921620337819483\n", + "P-Value: 0.08831527775786965\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1905\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1906\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.4602459016393443\n", + "T-Statistic: -5.345769495574332\n", + "P-Value: 0.012801672818638945\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.4009180790960452\n", + "T-Statistic: -9.991472499692488\n", + "P-Value: 0.0021337257653321886\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1906\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1907\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.3818989071038251\n", + "T-Statistic: -15.322633755583363\n", + "P-Value: 0.0006037415969847308\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.36666666666666664\n", + "Average of Other Ratios: 0.3220338983050848\n", + "T-Statistic: -4.561067126598043\n", + "P-Value: 0.019760347058840888\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1907\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1908\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.48750000000000004\n", + "T-Statistic: -3.044989012722217\n", + "P-Value: 0.055647600875460466\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.4262005649717514\n", + "T-Statistic: -6.726801374615161\n", + "P-Value: 0.006707009265801296\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1908\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1909\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.45\n", + "T-Statistic: -3.5859661472271527\n", + "P-Value: 0.03712903167578981\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.34611581920903955\n", + "T-Statistic: -7.571218430433462\n", + "P-Value: 0.004779142305515517\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1909\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1910\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.34822404371584703\n", + "T-Statistic: -4.589291871691855\n", + "P-Value: 0.019434311800169517\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.286864406779661\n", + "T-Statistic: -11.301306940901064\n", + "P-Value: 0.0014858566277749805\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1910\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1911\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.47540983606557374\n", + "Average of Other Ratios: 0.35833333333333334\n", + "T-Statistic: -3.1680031770414296\n", + "P-Value: 0.050558348943150866\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.32076271186440675\n", + "T-Statistic: -8.564496343017598\n", + "P-Value: 0.0033454209602583485\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1911\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1912\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.3916666666666666\n", + "T-Statistic: -13.752294116926654\n", + "P-Value: 0.0008320313667330868\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3628531073446328\n", + "T-Statistic: -4.591316366529474\n", + "P-Value: 0.019411195178700307\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1912\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1913\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.36038251366120216\n", + "T-Statistic: -4.510639994744203\n", + "P-Value: 0.02036072397240279\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3208333333333333\n", + "T-Statistic: -10.424077229245048\n", + "P-Value: 0.0018843150010148248\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1913\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1914\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4918032786885246\n", + "Average of Other Ratios: 0.36666666666666664\n", + "T-Statistic: -2.214046423571898\n", + "P-Value: 0.1136718934082652\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.30508474576271183\n", + "T-Statistic: -3.4430305632479454\n", + "P-Value: 0.04114498062149175\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1914\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1915\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.47540983606557374\n", + "Average of Other Ratios: 0.44166666666666665\n", + "T-Statistic: -2.3377953522924186\n", + "P-Value: 0.10142500753372925\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.36709039548022593\n", + "T-Statistic: -9.92864680939292\n", + "P-Value: 0.0021735197350935946\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1915\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1916\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6833333333333333\n", + "Average of Other Ratios: 0.3445355191256831\n", + "T-Statistic: -8.421776890073554\n", + "P-Value: 0.0035127236241960158\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.3305084745762712\n", + "T-Statistic: -41.4537493278151\n", + "P-Value: 3.0893720674523154e-05\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1916\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1917\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.43196721311475406\n", + "T-Statistic: -3.1667548890956354\n", + "P-Value: 0.05060697458442003\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5423728813559322\n", + "Average of Other Ratios: 0.33785310734463275\n", + "T-Statistic: -7.8732869320554375\n", + "P-Value: 0.004269124399355101\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1917\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1918\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4644808743169399\n", + "T-Statistic: -2.238658218669735\n", + "P-Value: 0.11109977799644323\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5932203389830508\n", + "Average of Other Ratios: 0.38834745762711864\n", + "T-Statistic: -10.638370665053376\n", + "P-Value: 0.0017750090525035277\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1918\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1919\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.3859289617486339\n", + "T-Statistic: -4.12442499010612\n", + "P-Value: 0.025842773831512598\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.3389830508474576\n", + "T-Statistic: -3.9436024140371986\n", + "P-Value: 0.029067057994347754\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1919\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1920\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.3979508196721311\n", + "T-Statistic: -6.410996076998129\n", + "P-Value: 0.00768961910808135\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3209039548022599\n", + "T-Statistic: -5.521881967348604\n", + "P-Value: 0.011699587084326106\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1920\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1921\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.38579234972677595\n", + "T-Statistic: -3.247114142040317\n", + "P-Value: 0.04759348039328008\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.29964689265536726\n", + "T-Statistic: -5.367242638173358\n", + "P-Value: 0.01266022746944358\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1921\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1922\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7868852459016393\n", + "Average of Other Ratios: 0.4\n", + "T-Statistic: -32.828301710168866\n", + "P-Value: 6.212646695041301e-05\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3714689265536723\n", + "T-Statistic: -5.21158171207224\n", + "P-Value: 0.013734015073651634\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1922\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1923\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.4\n", + "T-Statistic: -9.652812843530201\n", + "P-Value: 0.002360364565844294\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.35035310734463276\n", + "T-Statistic: -4.9456020241512695\n", + "P-Value: 0.015860547909964277\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1923\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1924\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7213114754098361\n", + "Average of Other Ratios: 0.33749999999999997\n", + "T-Statistic: -13.159250585480095\n", + "P-Value: 0.0009480287588880445\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.2750470809792844\n", + "T-Statistic: -4.254446720326273\n", + "P-Value: 0.05105369953469059\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1924\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1925\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.3958333333333333\n", + "T-Statistic: -3.9531674853921452\n", + "P-Value: 0.02888393838786599\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.34597457627118644\n", + "T-Statistic: -14.428366472211776\n", + "P-Value: 0.0007217047864811893\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1925\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1926\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.38750000000000007\n", + "T-Statistic: -15.392023937439065\n", + "P-Value: 0.0005956941961595111\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.3921610169491525\n", + "T-Statistic: -5.123685756910068\n", + "P-Value: 0.014393190365221242\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1926\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1927\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.475\n", + "T-Statistic: -2.1418349190706736\n", + "P-Value: 0.12164197076622749\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3547316384180791\n", + "T-Statistic: -2.6999881415740097\n", + "P-Value: 0.07378677879364155\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1927\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1928\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.3730874316939891\n", + "T-Statistic: -3.9173359194644175\n", + "P-Value: 0.029577655575744665\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.31221751412429377\n", + "T-Statistic: -4.544899514360726\n", + "P-Value: 0.01995030538814641\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1928\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1929\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6885245901639344\n", + "Average of Other Ratios: 0.3583333333333334\n", + "T-Statistic: -6.697501094028931\n", + "P-Value: 0.006790960911751886\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.34246704331450095\n", + "T-Statistic: -4.8064516129032215\n", + "P-Value: 0.04066429179543451\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1929\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1930\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.4520491803278689\n", + "T-Statistic: -3.381086105792825\n", + "P-Value: 0.04305684351738099\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.559322033898305\n", + "Average of Other Ratios: 0.3800847457627119\n", + "T-Statistic: -3.055206987855407\n", + "P-Value: 0.05520098921136872\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1930\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1931\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.34419398907103826\n", + "T-Statistic: -5.4455564685402775\n", + "P-Value: 0.012161350131513373\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.35\n", + "Average of Other Ratios: 0.2754237288135593\n", + "T-Statistic: -2.66340767777208\n", + "P-Value: 0.07611699066362566\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1931\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1932\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.75\n", + "Average of Other Ratios: 0.4474726775956285\n", + "T-Statistic: -5.428996703057984\n", + "P-Value: 0.012264667663581029\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.4429378531073446\n", + "T-Statistic: -6.248223190659335\n", + "P-Value: 0.008270584465932308\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1932\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1933\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.38333333333333336\n", + "Average of Other Ratios: 0.3279371584699453\n", + "T-Statistic: -4.7611795122867155\n", + "P-Value: 0.017592360487795486\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.2992231638418079\n", + "T-Statistic: -3.1375946196106543\n", + "P-Value: 0.05175986194673748\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1933\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1934\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.39009562841530054\n", + "T-Statistic: -7.41768759066674\n", + "P-Value: 0.005069388475696104\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.33771186440677964\n", + "T-Statistic: -7.301351957262182\n", + "P-Value: 0.005304981348263639\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1934\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1935\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.42083333333333334\n", + "T-Statistic: -1.9061102831594638\n", + "P-Value: 0.1527095539473995\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3080508474576271\n", + "T-Statistic: -3.1525851210756604\n", + "P-Value: 0.051163101927519376\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1935\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1936\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.38586065573770495\n", + "T-Statistic: -2.611459754304495\n", + "P-Value: 0.07958474323908545\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3389830508474576\n", + "Average of Other Ratios: 0.29124293785310734\n", + "T-Statistic: -3.5699799446212253\n", + "P-Value: 0.03755260022285919\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1936\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1937\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7666666666666667\n", + "Average of Other Ratios: 0.39009562841530054\n", + "T-Statistic: -41.93436311911947\n", + "P-Value: 2.9845043324434687e-05\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.38001412429378534\n", + "T-Statistic: -2.034149230295387\n", + "P-Value: 0.13480390529326824\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1937\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1938\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.40218579234972673\n", + "T-Statistic: -4.703745486314339\n", + "P-Value: 0.01818156231348825\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.3374293785310734\n", + "T-Statistic: -2.3577069273967326\n", + "P-Value: 0.09960676963753387\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1938\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1939\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.375\n", + "T-Statistic: -5.5469553767808515\n", + "P-Value: 0.011552909711436831\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3080508474576271\n", + "T-Statistic: -17.7463880302814\n", + "P-Value: 0.0003901203293975093\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1939\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1940\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.43333333333333335\n", + "T-Statistic: -2.4127247774526692\n", + "P-Value: 0.09478502280607064\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.3704331450094162\n", + "T-Statistic: -1.7365199955826325\n", + "P-Value: 0.22460571149425088\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1940\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1941\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.3896174863387978\n", + "T-Statistic: -6.0971062067672115\n", + "P-Value: 0.008862646607214728\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.328954802259887\n", + "T-Statistic: -5.202033883431729\n", + "P-Value: 0.013803686006307778\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1941\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1942\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.4375\n", + "T-Statistic: -2.361603838210462\n", + "P-Value: 0.09925556793924133\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3628531073446327\n", + "T-Statistic: -3.2057938752604676\n", + "P-Value: 0.04911378606623852\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1942\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1943\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7377049180327869\n", + "Average of Other Ratios: 0.4333333333333333\n", + "T-Statistic: -7.354109955820557\n", + "P-Value: 0.0051963894030398975\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.35444915254237286\n", + "T-Statistic: -3.7842912965837407\n", + "P-Value: 0.032348636596183485\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1943\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1944\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.4151639344262295\n", + "T-Statistic: -5.271696829221992\n", + "P-Value: 0.013305730606527047\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5932203389830508\n", + "Average of Other Ratios: 0.30007062146892655\n", + "T-Statistic: -5.840104693319583\n", + "P-Value: 0.010003865123379863\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1944\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1945\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.4810792349726776\n", + "T-Statistic: -3.1931249674651547\n", + "P-Value: 0.04959217968663012\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3880414312617702\n", + "T-Statistic: -2.3384829558815743\n", + "P-Value: 0.14430805754256285\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1945\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1946\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.42500000000000004\n", + "T-Statistic: -4.904097017214922\n", + "P-Value: 0.01623024607177803\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.37071563088512244\n", + "T-Statistic: -2.2572802622847377\n", + "P-Value: 0.15257812860009795\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1946\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1947\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.3939890710382513\n", + "T-Statistic: -2.375708105544391\n", + "P-Value: 0.09799696855032386\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.358545197740113\n", + "T-Statistic: -2.3319342552790907\n", + "P-Value: 0.10196787590396081\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1947\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1948\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.448292349726776\n", + "T-Statistic: -5.984088746615719\n", + "P-Value: 0.00934226482517408\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.3627824858757062\n", + "T-Statistic: -1.9704993917573732\n", + "P-Value: 0.1433770749616963\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1948\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1949\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5081967213114754\n", + "Average of Other Ratios: 0.42083333333333334\n", + "T-Statistic: -2.548903055279766\n", + "P-Value: 0.08402331367690975\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4666666666666667\n", + "Average of Other Ratios: 0.3813559322033898\n", + "T-Statistic: -2.4415252920324195\n", + "P-Value: 0.09237446595146682\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1949\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1950\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.37499999999999994\n", + "T-Statistic: -6.875471004283952\n", + "P-Value: 0.006301448294158944\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3713276836158192\n", + "T-Statistic: -6.5420904956073125\n", + "P-Value: 0.007260153005952\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1950\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1951\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.375\n", + "T-Statistic: -8.04851009961339\n", + "P-Value: 0.004005903148525875\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3334745762711865\n", + "T-Statistic: -3.470097292282929\n", + "P-Value: 0.04034351536878077\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1951\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1952\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.43565573770491806\n", + "T-Statistic: -8.810845426280132\n", + "P-Value: 0.00308060726714541\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3879237288135593\n", + "T-Statistic: -2.1390171916328864\n", + "P-Value: 0.12196630264944257\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1952\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1953\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.44583333333333336\n", + "T-Statistic: -3.786983637511313\n", + "P-Value: 0.03228933418227714\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.48333333333333334\n", + "Average of Other Ratios: 0.40254237288135597\n", + "T-Statistic: -2.8010960513215193\n", + "P-Value: 0.06779122416384437\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1953\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1954\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.4066256830601093\n", + "T-Statistic: -3.9383341934243377\n", + "P-Value: 0.0291685537410846\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.379590395480226\n", + "T-Statistic: -2.325606550774779\n", + "P-Value: 0.10255792463406842\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1954\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1955\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.65\n", + "Average of Other Ratios: 0.4560792349726776\n", + "T-Statistic: -5.067243916956274\n", + "P-Value: 0.014838352365228525\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.40433145009416194\n", + "T-Statistic: -2.662987333476817\n", + "P-Value: 0.11681579707766619\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1955\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1956\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.41058743169398904\n", + "T-Statistic: -4.106153946552308\n", + "P-Value: 0.026146933323788254\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3373587570621469\n", + "T-Statistic: -5.418875755811864\n", + "P-Value: 0.012328378409982957\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1956\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1957\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.3666666666666667\n", + "T-Statistic: -11.1491127146561\n", + "P-Value: 0.0015463681392427176\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.32902542372881355\n", + "T-Statistic: -5.097322089843262\n", + "P-Value: 0.014598923701575601\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1957\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1958\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5901639344262295\n", + "Average of Other Ratios: 0.4\n", + "T-Statistic: -10.563455564617959\n", + "P-Value: 0.0018122415508561155\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.38375706214689265\n", + "T-Statistic: -2.795942882520684\n", + "P-Value: 0.06808180891277789\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1958\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1959\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.3625\n", + "T-Statistic: -5.013582256122774\n", + "P-Value: 0.015278375182884808\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3250706214689265\n", + "T-Statistic: -4.171340373881652\n", + "P-Value: 0.02508240388592301\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1959\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1960\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6557377049180327\n", + "Average of Other Ratios: 0.38333333333333336\n", + "T-Statistic: -4.064948814456438\n", + "P-Value: 0.026850007364746683\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.3246468926553673\n", + "T-Statistic: -6.228587468296307\n", + "P-Value: 0.008344518944513782\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1960\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1961\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.3569672131147541\n", + "T-Statistic: -7.573495790587439\n", + "P-Value: 0.004775004170929797\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.2838983050847458\n", + "T-Statistic: -8.87796045374059\n", + "P-Value: 0.0030132990718159855\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1961\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1962\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5573770491803278\n", + "Average of Other Ratios: 0.32500000000000007\n", + "T-Statistic: -3.590011324398241\n", + "P-Value: 0.03702281920404291\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.2824858757062147\n", + "T-Statistic: -4.417410272265133\n", + "P-Value: 0.021534399027710104\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1962\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1963\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7213114754098361\n", + "Average of Other Ratios: 0.4666666666666667\n", + "T-Statistic: -4.791779056643048\n", + "P-Value: 0.017288564339617386\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.39279661016949147\n", + "T-Statistic: -3.218304471414686\n", + "P-Value: 0.04864708009336623\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1963\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1964\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4355191256830601\n", + "T-Statistic: -2.7776540598466357\n", + "P-Value: 0.06912570353142605\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3543785310734463\n", + "T-Statistic: -3.815695684869742\n", + "P-Value: 0.03166549916342224\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1964\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1965\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5245901639344263\n", + "Average of Other Ratios: 0.325\n", + "T-Statistic: -7.221443843083816\n", + "P-Value: 0.005475227116046089\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.31228813559322033\n", + "T-Statistic: -11.326278432886252\n", + "P-Value: 0.0014762298056976652\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1965\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1966\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.4687841530054645\n", + "T-Statistic: -6.1047509028321505\n", + "P-Value: 0.008831382943901647\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.4092514124293785\n", + "T-Statistic: -3.953202519249618\n", + "P-Value: 0.028883270408863487\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1966\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1967\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.4041666666666667\n", + "T-Statistic: -12.735108311334004\n", + "P-Value: 0.0010444936119927254\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.36264124293785305\n", + "T-Statistic: -2.925487770616238\n", + "P-Value: 0.06122360955751544\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1967\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1968\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.4458333333333333\n", + "T-Statistic: -4.993743185709583\n", + "P-Value: 0.015445355698632398\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.359322033898305\n", + "T-Statistic: -1.8552679320224146\n", + "P-Value: 0.20470801229945912\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1968\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1969\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.43920765027322406\n", + "T-Statistic: -3.6522403422007157\n", + "P-Value: 0.0354367438599517\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.3965395480225989\n", + "T-Statistic: -9.383684565305357\n", + "P-Value: 0.002563738947888246\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1969\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1970\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6166666666666667\n", + "Average of Other Ratios: 0.3688524590163934\n", + "T-Statistic: -6.85232843082187\n", + "P-Value: 0.006362416633878472\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.3474576271186441\n", + "T-Statistic: -5.812651983124662\n", + "P-Value: 0.010136920657992399\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1970\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1971\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.38558743169398907\n", + "T-Statistic: -3.0561029466886156\n", + "P-Value: 0.05516204355542588\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3081214689265537\n", + "T-Statistic: -3.056600083912935\n", + "P-Value: 0.05514044883654204\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1971\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1972\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.360724043715847\n", + "T-Statistic: -8.874674345400543\n", + "P-Value: 0.0030165490767111924\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3456920903954802\n", + "T-Statistic: -2.7626457956106245\n", + "P-Value: 0.06999725904230607\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1972\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1973\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.38642987249544625\n", + "T-Statistic: -3.3493937263441476\n", + "P-Value: 0.07875291250488248\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4067796610169492\n", + "Average of Other Ratios: 0.3418079096045198\n", + "T-Statistic: -4.4263520637871325\n", + "P-Value: 0.021418091531717674\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1973\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1974\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.4558743169398907\n", + "T-Statistic: -3.7027285641405947\n", + "P-Value: 0.03421325208693942\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5423728813559322\n", + "Average of Other Ratios: 0.37528248587570623\n", + "T-Statistic: -8.13082423873902\n", + "P-Value: 0.0038896389696946266\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1974\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1975\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45901639344262296\n", + "Average of Other Ratios: 0.35833333333333334\n", + "T-Statistic: -2.0822726427097917\n", + "P-Value: 0.12872281264009697\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.45\n", + "Average of Other Ratios: 0.3050847457627119\n", + "T-Statistic: -8.55\n", + "P-Value: 0.0033619268709983345\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1975\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1976\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7166666666666667\n", + "Average of Other Ratios: 0.42691256830601093\n", + "T-Statistic: -7.141009375063902\n", + "P-Value: 0.005653944218972081\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3840395480225989\n", + "T-Statistic: -6.277129309215832\n", + "P-Value: 0.00816330194750534\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1976\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1977\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.42704918032786887\n", + "T-Statistic: -3.8405509086761342\n", + "P-Value: 0.031137895819023056\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4166666666666667\n", + "Average of Other Ratios: 0.3771186440677966\n", + "T-Statistic: -1.974967724404075\n", + "P-Value: 0.14275464845123892\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1977\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1978\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.44808743169398907\n", + "T-Statistic: -2.524971074527996\n", + "P-Value: 0.08580160862644565\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.36730225988700566\n", + "T-Statistic: -2.6278882027424713\n", + "P-Value: 0.07846735545649752\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1978\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1979\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6721311475409836\n", + "Average of Other Ratios: 0.5\n", + "T-Statistic: -4.543656514596028\n", + "P-Value: 0.019965007637516786\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6440677966101694\n", + "Average of Other Ratios: 0.4809322033898305\n", + "T-Statistic: -5.744562646538028\n", + "P-Value: 0.010477088755276323\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1979\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1980\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6\n", + "Average of Other Ratios: 0.5059426229508197\n", + "T-Statistic: -2.819285590304159\n", + "P-Value: 0.06677779498737416\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4576271186440678\n", + "Average of Other Ratios: 0.4137005649717514\n", + "T-Statistic: -2.715044307099425\n", + "P-Value: 0.0728534025239453\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1980\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1981\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.4125\n", + "T-Statistic: -5.2754090950771\n", + "P-Value: 0.01327985815124129\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.559322033898305\n", + "Average of Other Ratios: 0.2867937853107345\n", + "T-Statistic: -3.7484025543573445\n", + "P-Value: 0.03315263271031142\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1981\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1982\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5\n", + "Average of Other Ratios: 0.3941256830601093\n", + "T-Statistic: -9.09710470999564\n", + "P-Value: 0.002806625908000589\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3584039548022599\n", + "T-Statistic: -3.4358556656684405\n", + "P-Value: 0.041360824576078994\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1982\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1983\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.35833333333333334\n", + "T-Statistic: -9.378324457589393\n", + "P-Value: 0.002568020999171169\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.423728813559322\n", + "Average of Other Ratios: 0.32895480225988705\n", + "T-Statistic: -3.853459154520629\n", + "P-Value: 0.03086833989707754\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1983\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1984\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.3125\n", + "T-Statistic: -10.826994016721436\n", + "P-Value: 0.0016856521372314135\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.3135593220338983\n", + "T-Statistic: -4.2613603366990604\n", + "P-Value: 0.0237024761862663\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1984\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1985\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6833333333333333\n", + "Average of Other Ratios: 0.49767759562841535\n", + "T-Statistic: -7.0718291988043145\n", + "P-Value: 0.005813854687167852\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6101694915254238\n", + "Average of Other Ratios: 0.4471751412429379\n", + "T-Statistic: -10.661446386757238\n", + "P-Value: 0.0017637451315993058\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1985\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1986\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5409836065573771\n", + "Average of Other Ratios: 0.38333333333333336\n", + "T-Statistic: -3.5333558289091114\n", + "P-Value: 0.0385464838592851\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3898305084745763\n", + "Average of Other Ratios: 0.354590395480226\n", + "T-Statistic: -2.6581313276202945\n", + "P-Value: 0.07646057355190569\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1986\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1987\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5833333333333334\n", + "Average of Other Ratios: 0.4810792349726776\n", + "T-Statistic: -2.348566398200449\n", + "P-Value: 0.10043648507703083\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5166666666666667\n", + "Average of Other Ratios: 0.42372881355932207\n", + "T-Statistic: -2.2081034216541404\n", + "P-Value: 0.11430368101220882\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1987\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1988\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.4560109289617486\n", + "T-Statistic: -2.3723659709016056\n", + "P-Value: 0.09829344194941916\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3291666666666667\n", + "T-Statistic: -4.203856330713102\n", + "P-Value: 0.024572283074481203\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1988\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1989\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.41454918032786886\n", + "T-Statistic: -2.568978311749152\n", + "P-Value: 0.08256644079807948\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5254237288135594\n", + "Average of Other Ratios: 0.400635593220339\n", + "T-Statistic: -6.380724773314517\n", + "P-Value: 0.0077935062804952064\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1989\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1990\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.7049180327868853\n", + "Average of Other Ratios: 0.4083333333333333\n", + "T-Statistic: -6.76630498961627\n", + "P-Value: 0.006595971681668088\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5084745762711864\n", + "Average of Other Ratios: 0.35861581920903957\n", + "T-Statistic: -2.999030425264594\n", + "P-Value: 0.05771345344001608\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1990\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1991\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.43333333333333335\n", + "Average of Other Ratios: 0.35252732240437157\n", + "T-Statistic: -2.7722967072440508\n", + "P-Value: 0.06943525700132411\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.3728813559322034\n", + "Average of Other Ratios: 0.295409604519774\n", + "T-Statistic: -3.600128581629112\n", + "P-Value: 0.03675886811551069\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1991\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1992\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6333333333333333\n", + "Average of Other Ratios: 0.43968579234972677\n", + "T-Statistic: -6.462196948447438\n", + "P-Value: 0.0075180098331947\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.559322033898305\n", + "Average of Other Ratios: 0.40508474576271186\n", + "T-Statistic: -8.389086394729976\n", + "P-Value: 0.003552594693743783\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1992\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1993\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6229508196721312\n", + "Average of Other Ratios: 0.3958333333333333\n", + "T-Statistic: -4.368818491249194\n", + "P-Value: 0.02218094363638083\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.3540254237288135\n", + "T-Statistic: -2.4793636360304507\n", + "P-Value: 0.08932026186301084\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1993\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1994\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6666666666666666\n", + "Average of Other Ratios: 0.4734289617486339\n", + "T-Statistic: -4.251277919668526\n", + "P-Value: 0.023852073711438902\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4915254237288136\n", + "Average of Other Ratios: 0.42189265536723164\n", + "T-Statistic: -3.2416676813361778\n", + "P-Value: 0.04779043382080003\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1994\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1995\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5737704918032787\n", + "Average of Other Ratios: 0.43333333333333335\n", + "T-Statistic: -20.63996275066481\n", + "P-Value: 0.00024870591964064987\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3673728813559322\n", + "T-Statistic: -2.8797359104311173\n", + "P-Value: 0.06354272267077887\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1995\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1996\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5333333333333333\n", + "Average of Other Ratios: 0.40232240437158473\n", + "T-Statistic: -2.8318259683509934\n", + "P-Value: 0.06609008772343258\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4\n", + "Average of Other Ratios: 0.3474576271186441\n", + "T-Statistic: -2.978383660774621\n", + "P-Value: 0.05867285324441682\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1996\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1997\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5666666666666667\n", + "Average of Other Ratios: 0.410655737704918\n", + "T-Statistic: -3.6504320440032956\n", + "P-Value: 0.03548159461405821\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.3712570621468927\n", + "T-Statistic: -2.386590270664442\n", + "P-Value: 0.09703914637860833\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1997\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1998\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.55\n", + "Average of Other Ratios: 0.44795081967213113\n", + "T-Statistic: -2.5131484576925534\n", + "P-Value: 0.08669715647138371\n", + "The highest ratio is not significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4745762711864407\n", + "Average of Other Ratios: 0.4093926553672317\n", + "T-Statistic: -2.226010109872328\n", + "P-Value: 0.11241274845462867\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1998\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 1999\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.6065573770491803\n", + "Average of Other Ratios: 0.38333333333333336\n", + "T-Statistic: -4.2005187653190665\n", + "P-Value: 0.02462402343066429\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.4406779661016949\n", + "Average of Other Ratios: 0.38834745762711864\n", + "T-Statistic: -3.1550201885686455\n", + "P-Value: 0.051066984905793825\n", + "The highest ratio is not significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 1999\n", + "___________________________________________________________________________________________________________________________\n", + "___________________________________________________________________________________________________________________________\n", + "Doing 2000\n", + "___________________________________________________________________________________________________________________________\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.639344262295082\n", + "Average of Other Ratios: 0.4041666666666667\n", + "T-Statistic: -8.814856757089686\n", + "P-Value: 0.003076528811000136\n", + "The highest ratio is significantly different from the others.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n", + "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Highest Match Ratio: 0.5423728813559322\n", + "Average of Other Ratios: 0.37097457627118646\n", + "T-Statistic: -5.074685831579439\n", + "P-Value: 0.014778639324226026\n", + "The highest ratio is significantly different from the others.\n", + "___________________________________________________________________________________________________________________________\n", + "Done 2000\n", + "___________________________________________________________________________________________________________________________\n" + ] + } + ], + "source": [ + "list_of_significance = []\n", + "list_of_significance_watermarked = []\n", + "count_t = 0\n", + "for text in test_cases:\n", + " count_t+=1\n", + " print(\"___________________________________________________________________________________________________________________________\")\n", + " print(\"Doing\", count_t)\n", + " print(\"___________________________________________________________________________________________________________________________\")\n", + "\n", + " words_to_add = [\"example\", \"test\", \"random\", \"insert\"]\n", + " num_words_to_add = 5\n", + "\n", + " # modified_text = randomly_add_words(text, words_to_add, num_words_to_add)\n", + " modified_text = randomly_add_words(watermark_text(text, offset=0), words_to_add, num_words_to_add)\n", + " # print(\"Original Text:\")\n", + " # print(text)\n", + " # print(\"\\nModified Text:\")\n", + " # print(modified_text)\n", + "\n", + " match_ratios = watermark_text_and_calculate_matches(modified_text, max_offset=5)\n", + " # print(match_ratios)\n", + " list_of_significance_watermarked.append(check_significant_difference(match_ratios))\n", + "\n", + " match_ratios = watermark_text_and_calculate_matches(text, max_offset=5)\n", + " list_of_significance.append(check_significant_difference(match_ratios))\n", + "\n", + " print(\"___________________________________________________________________________________________________________________________\")\n", + " print(\"Done\", count_t, )\n", + " print(\"___________________________________________________________________________________________________________________________\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 40, + "metadata": { + "id": "EHumpLgzZK0Z" + }, + "outputs": [], + "source": [ + "import pandas as pd\n", + "\n", + "df_significance = pd.DataFrame(list_of_significance, columns=['Highest Ratio', 'Average Others', 'T-Statistic', 'P-Value'])\n", + "df_significance_watermarked = pd.DataFrame(list_of_significance_watermarked, columns=['Highest Ratio', 'Average Others', 'T-Statistic', 'P-Value'])\n", + "\n", + "# Add a label column to distinguish between the two sets\n", + "df_significance['Label'] = 'Original'\n", + "df_significance_watermarked['Label'] = 'Watermarked'\n", + "\n", + "# Combine the DataFrames\n", + "combined_df = pd.concat([df_significance, df_significance_watermarked], ignore_index=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "metadata": { + "id": "I5Wm6PTHsOy-" + }, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
Highest RatioAverage OthersT-StatisticP-ValueLabel
00.2333330.182203-3.5327580.038563Original
10.2033900.139195-3.4405910.041218Original
20.3389830.270339-2.2286080.112142Original
30.2542370.168362-2.4516130.246559Original
40.2881360.210876-5.4675400.012026Original
..................
39950.5333330.402322-2.8318260.066090Watermarked
39960.5666670.410656-3.6504320.035482Watermarked
39970.5500000.447951-2.5131480.086697Watermarked
39980.6065570.383333-4.2005190.024624Watermarked
39990.6393440.404167-8.8148570.003077Watermarked
\n", + "

4000 rows × 5 columns

\n", + "
" + ], + "text/plain": [ + " Highest Ratio Average Others T-Statistic P-Value Label\n", + "0 0.233333 0.182203 -3.532758 0.038563 Original\n", + "1 0.203390 0.139195 -3.440591 0.041218 Original\n", + "2 0.338983 0.270339 -2.228608 0.112142 Original\n", + "3 0.254237 0.168362 -2.451613 0.246559 Original\n", + "4 0.288136 0.210876 -5.467540 0.012026 Original\n", + "... ... ... ... ... ...\n", + "3995 0.533333 0.402322 -2.831826 0.066090 Watermarked\n", + "3996 0.566667 0.410656 -3.650432 0.035482 Watermarked\n", + "3997 0.550000 0.447951 -2.513148 0.086697 Watermarked\n", + "3998 0.606557 0.383333 -4.200519 0.024624 Watermarked\n", + "3999 0.639344 0.404167 -8.814857 0.003077 Watermarked\n", + "\n", + "[4000 rows x 5 columns]" + ] + }, + "execution_count": 41, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "combined_df" + ] + }, + { + "cell_type": "code", + "execution_count": 43, + "metadata": { + "id": "FHbeKfN_se6m" + }, + "outputs": [], + "source": [ + "# combined_df.to_csv(\"Results.csv\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/papers/atharva_rasane/00_myst_template/Correlation_Matrix.png b/papers/atharva_rasane/00_myst_template/Correlation_Matrix.png new file mode 100644 index 0000000000000000000000000000000000000000..e2948665f96153e561eaec7bf36c3a0fdb713aa5 GIT binary patch literal 66996 zcmZs@2RK`A{5Gyb2TF(4fli8AC1%l5Rn#bABw|!;VpDsxXsc+esNLEUu_Yu%(Na}g zVnmFp89|E#G5#m|{k^~cdtGm?OH;1%oO7P%JfC&n_vgGa(9=A{a)E`3iRqNq!~2Fz zOh;OQAMF#zfWMUf$Pfg+4tX1Dsxg&zUs?cuIqGmv_Z|~dMFQ)N4KwijNsou--b_r~ zHyA&Mv<$DWGBNGvYu&$R{LFHRbliH(cX$7fKUJ?PEiY(AS17l7N%`rko9d5OV_t9i z?~J{a@6Be_ILgIox1!8aUBW#-9)wwVm?#^Ruut3QfN7~iyxbBkH&q0-r~dAa(}L=7 z7HL5wy7bix%8WZ?3QxFyO#Bezf2*fjBpKgd{7(Qg06r5}*p4v1gJUGw7~iXW_KC-s zf**N$R-i2E`o}}*Zc1xErQJ4Hr?>?K1Zt`CPL7V{m6f-HW>Xz*iHmm&>ZU#2rPDYp zVz2#ayXZcQb|aRUmRb63FBBf99%c&vRFoZX*y;$AT3*jNQBhI1&SWvPeb)tOjdK#p zvSE}^X5}6b5Fl=>{p3lG#{K)XWlGPLgQ&ygvL<-b&WwcH`h1`4v(?E~_SPFWZX{=C zyK87@B)@~Uu{EP@tZfxv-fxA(0&vMEu`fJ8`%L9yyXS0c6^6|V!A0h`uh9B zJr#|OpMzl05sd4=MlqH^Fk2n?2q6RR=UUCZ9D=CKOyBegjd zW^xa+GBHKY3!Y+%5jMjcGX+jY9A6Sr-Fg*5aj~ib?rN zRC|(*H$s8C8H?fTLXO8E3kuqzruO$}P1HU53e?8NW-_gJr)PgBVRxe}%r}LJX>uCa zTOPceuBtn$>O4N+^E*FG8-IyJB2HL^Zq4}3B$p=!2Mks zaZYtVz?6?)>dA|5ZEeTm9dnq9;HA#Z(`e! zdRS~YRCmf*<5=u2;E?b-#3Qn7jXp*yi0Uf$`2FF**{*b$?_HPz(TCL*%-SK-HOqCyy{J{-P`@1W+_JZZf zh9H_Bez+%iR(jcwQ*WalsnpQ+j+|j_hW9R*vs4uzSt)0)&309V?b9cg75df`?0@po zs``3hH1(czxPAh7F2n1G@Ukbu3q{W}2>sh3gbOeAY{#lLpR6`^B9QfLhkKM(>W^fp z?%o0ESxoI^cz1jD7|DmqA7c8_*K!mB?5jaB>Ox_*U$c>I6qh!aHPjIN__O8}5ndVp z<*@}pXg{xU7op^G?x(zhSG7L%Z9T`_^&?h;Cg@qLsyIK2*S&d?|AcSE9o}19++1Uv zA36PIyEDmZpCgfxacb|o!sAU^P(&kZ@<>%7Jqw$iwyvERQInwy zncAC4u|l7hwy*v1Y}L=linHxLDeF0xAs=#1o0f|}{D@MHf-*}mR~8!;VoxJaR?BFC z*G^V*u7ar44xwzH2qPMprj-a;pss)P$==xHn&%A6e)j9+&biVN3i(*3kS-);WD?v2mS4_8P2{rc)0>?YUl&bm#wr3BWJ zrmowQ*=;IGIDR7VlA?*t=AU)}e^TzocK=g*MZE;~FX4y8#}-3(rv>&src7w+#O;8a zuH+-^%#OZQaD|BEtm7oKiu}TO1CHJp7MF1lR^V+oc0the zkg$tJ*bmKDx|#lT&IYk8}_onn~bgwXYCbSkFFZ7w^k zQEAqCJ;W!QYn-#RX*Eiswmlb79+|DuiMIuR!`)Ek*p|FI1tATBDw0GNM#~l*ia1@l z`oVbcs_{cE<{)J_NeEUJKJmW0@A&0Fy>1n{D=paRJt*YMCwkd=9N1~7M?b+R=?OU1 z7YcD+?6lhZyEpfUSX^AZ?6*AE0k(N$1D(*1(6lkT0zL$5et1utcpS+l`gFmsM}C^k zzX=>+Y-&l8AaP{ozAnY8BA*nqU=Al!kuH_kbL60+mg3ST7SF(+2s6x*Ac}*Ha}RW8 z1bCJ+joFPDL^8|kwE9O}ZX`@b#wAlNe9wK@_ZW7*#jGPyzRG&c{$LoFBG=W8v9R~E zNj6=AsFmWf8mk1Wo|waVh@ou{wVvnbjgM)j(Ho<^wg#Kox9wijQ|DC{Di-IE9;Mnf zS?5A7=de_$oal>T3H+(3&*cLvn=|T3RI660H~2F2%r}Y*_*Z;LZO%|!d|!;%cJBjH z>+>X=T{g~wjSB<{S!!Y9&myu6TQKxo<>|>iS0<)SncUlKOjiuw8k{ilf5QCAK<#HO+AtF1)Paa90ni5g6cw-d@BA@Jol*Q z`7-iwC3=IGLpF7&dH&A=eL2WyKj7+Fm1$qXVaoOZ#Qsxqkcik5sE1++h~3+ zLdS}Q-2Ak=ojO0qy@TEIE?CLm&CyX7SUgt7aeBQb(tmY~;Mm0fe7Q`;thd&q>lX?> z7HA2sVUM>u#xyw)-{i-1`YJPX6iZaGfrGQjLt|c}kH%(39w!a0jX;mDXyMzFujt=1 z;_9`Q8%gFA=i20~kFBCNq6K!)MdWv2A$~n@y#!NC z{;P_?E|WQz_hO{vHmdVjxymgAF4@63!H09gu8XpfTt>?1Nn1mC<8V_Zz}iLpI?vaD zrG~um8Cj~#wj@Cgf^z(CS5t_}>H`b=;|L`@OhjNXM6j z&R|iZN6CMV;VdypjoDcOQNspU8M1-_7Zg^pVP?Eb@J5OcZkOk6xtwJR*Ovy;X9G;GGx~6SJS%7z#IvKqGQ$!O zI3~6rwOFx8FVdd4&4$!Q%vC64=FXY5FV>v`u^}Uoi{*&zz@LjEWyHrdjWa!!3cAku zwm=}jen%=*3f>*?E~pqo7?SG3Y-Cn3b4v8ge+5Q(^a{4;B2b7n{b_aJH0KqeV_F1Gk0 zwrx3-hMTA%x;K%O9n!u&!W0wPH&hlfW(9()lNu9Lcpv*h0ei0JdI{o(s7|-;)*WK% z9XTC+Xi~Ds1<__HatU!2QHpi@%#wj+;Pp|p{HHJC1lX! zq_&UF(ROiH+(Pv{vLmzN2l5_kYQr2svnJ9kqF=gFrQ4Y$qBNtXtp_{ohx; z#6PYzDH_UGLmn=%_cBQ#C;JdtW&$nTiWL{poIki^xa2`vCTrf$6I~Y6Pxw;6pU5}2 zEz^3*e>)}orRB+>RjmnCUUB>la{-T|+0t?TCN=WIHFq8+SPhAuCU1O(PiiJ6&-O2o z(&m?dTCdtYgbanFof}kSTrgP^>J2~u+pNCKn3+KhxbviwpcQy;Z>uK^>sg(aWct}- zd8`h;Z~=7)N#>TYToP$Yd!Yq;$MsSspx-t@c{Z7&|Ap}d{q3cy9_;CoKRA{yP3>lF zu)WxBu2dN|3{v^f)~x3G+vK^v(YAeHla29OgP|M`Xy!ECBs0_+GQ7+KHIuT8TT$VG zfIHl@rXipQV6eLwEqCsLtsPe*B=>Ytcq!-uxPw6JT`QW?n$YPPk?y%OeVEA1V=)=Z z&8I=r>1hTHSxhIp8S*UXBecoSLU=z7g(J!Wm)F+_YbYJ%vM$kPz+;;`HK{daNidr! zJFt`o^6`l?dnhL?&X(F`Rl6nw4yX90Z_p4~T}OF=4%F06nt-Xbe)^QN`f2eK7KhkI zzAd<0^6LE_P;L&Zb145lTw!lU_}Ru%z!C*n^V?ouD80J5+RA~u%qUlIwxHDaSbkA9 z@^J3Gdpa;1t}(Eu@ri+u!4M$F*e5Ty4(O{=5JbdNfj5Gt{eSg=-5*qIpFnV%501J+ zLSxTh@(|T3Oa^xAeVPKk6VSPvzxiLcl&ht+d~O?C3@{vsIhM8y8CsAk*74QspJwwD zb=NtaEol>(?gkN%IjSnt*JHi6vn4*fSko7LtCqivx!fcn#j2+(^xtJ>${L>2tm8& z9d#j4S#vx5m-YIcg+Jub?6AEp_?yk6rw2NH;#@H0NYwr}BbXX6{PiWFftD+NYjByZe+1e`YR`O~pP$ zm81$=yhIst6LEEde%7V0Z$=){^?_n_Nhy=VP-T6^2ko+DMaNcB(%_~ELYuo9m|XHY zFXvTgP<+||Z6JgYpxC&z>#V!#`EohEG0b39YTj@{b+d|)znnO)z#8q(mpi9D1Pbe3 zU&F6)2j3d?FK^G`(DEsT8(~mHHgWU(_MDI|rzW@&O?op#WkKOE(b_a+ z{Ngb&Ft7$Nsfv*4ixcJVu4#^{?)~8i0;(p9TDSy&{s5mZHjhnbKdZ-JeODwJ)(tPJ z1V5Wh(=Pz+);!q9`jTDy^R)c7W>SbZQY@bESU&p<{k77{p*q&25*7rwYv2Uo3B{4f ziYCNgwCiYC5ldMcm7V|2aDiT?vu)d(ijtC&FQ9DKo-Acqhwe*5To4!nd}f)1!{}zn z_vxu@Jtfu$RH3YzR6N%Ec&N6~WSZn?$)|EzA10OS1g}h ztd2F*6F~f+qe7s2L@oVadD@X zl>wT$g3w!VNkQP~Z%lf@}PFAMcdF{${)Q(Xb7qRn3`e4{CQ8{ZPbBptyBplz_XF z$5kCu!uDHK*xYw^Roz-wR#;sR?aA!K8r`ye2DfHi6=0rP>EU;v*Xw*KyPBJGOK`9E zTpxg{Wh<)=mDYapZWJ;+y|=m~f$j7#8j~1G&XWDHN1xcoo-B!cpO{Di>Ik$;XZu8dW^6Z`8LT>ESpHI*w z>#1{L6~34WuHnNwPJ_BeVlFw6`k#lNE!3?pQQa$Z8?El}jbraWtGTs{^vg_4@J$6^9p!a-y*VTkuOV0hM(NPEnT^{M`VejllzWuC0 zaK7J5dsM>7$*J^j$xLE(Rym>>5@goil`hxQ^@!9EME8vq`e(rGu|S$e>OcF?WvH|! zGV-w8tUsMff$vVb5z%$znn(QrRO&z<=?O)LQ6}LeY+dOv02)()fJ1aCYp{>ykw6P& z2lw0m{5VCp#Se1+poCTivPWQMpE8T7qY$v{t9<0~O6L8Xy9Lrr^l-L!mzU;-qwa-OpV^T-r2i~2V<83C>5&-HE$laD>wg@rmirdDN(XiozvQx9 zk^4lWtWC>NIVugXR-dV@---I@Eaf098EigYbZ{OhvYp9~fGLq(zO3K44+rRf4d5wE3y1sqQ!Me#YbMJ(F`4 zx>6AQhWJkJ6W$yc#`x?1I>-MWdbqt?{L42dTDfbVtNe4E;q0f`vTTN~@=7022afvi z<|#>6mHP)?HJr+Q#^y}lxJI>jD{>3Ob@t%2UwVrEJ;O{86hT8r-U@MsR?*4bqf~@a|7WD1(wF)@aD)hfAv48qgC(apT?oI2A z`M+t6yPA%V-L(FG5CxcvL| zF8%Sg*KU3*QYu}Bu_T{DZu%T+y#MT<%e;HsAmukfuw0SQ<(ovk^8e;l?rIvi|MqZI z0ctJa1mWzH$tElVvzq_TAHMS6t~=sLnDuZim{tGpHcl+YUyAQ4{GYif==vZanRS8fTO5~ENB?Pk6i^A|`-@iZf&y4S@V2ogrI6>Y&qnkAB;T+mE ze^=p=(#p1$Sii)ELgv6{|lat&;aFq*Y%qx~y_;zP~Kp!Xd^h6W0BtF#gYNB+)qlKx##ABa$41ygTZ%VlI` zvCF;AVoup2t%?s$F;2PD&1KWDIXHUMq#e5uy7l-whmxRR+k^N^PAd(xtcG1G5w4Yg z88Fq5Du*7w_W6TZGqvb`w~?2U>Qe#y2R1cFAZSVIAmA|2OqcC#2ADduz{>{^L%#C~d13Hfx`O z`)xZ~SXf9jZT!ncpOFZ2n%G_Tr+8=6Gh8O<HU9pV`|@F2->ItE zfrUH{CDh_c1|FRW;OGo01zRwZAGAJyM*cA4BH^op1(G`eFN4c}zqq(q0zlE00W(~f z_|>W&6&H#6)yRg;U$LbNE|dXZAO&NL3187D`oz2N`t=l{e*frlUQ@L&o6DWm9F5o# zfawtt6{P^PtIz}>#<#kk%?^ez9gdum&Pda*5oU9E`qa$X89}?FQS13dK|z54kZJ1Z zr6t$4e`eldct2NY)i>R7zcs?b_oG#nlvT`L+{F%{A zVHbP{j5{xol!4Si-v*o!vWt2J!crEJ2crVpN#JfO{VvLN5b zFA5oufv}yzE}P9Rxru54zzC@f21Z*)hXU*{O!n@Q1Kg^dk-e0w?o4rr%V!77rgHaO z=U|-UG-2m_R6{$7Fp|duP&#XKJ(!%FtGMZSr6uX?tv7LeN`zm(RMJL_k|wLGWq?J< zj^mYTd17lzd3fAbI$h?AYd=$!){!>X{_w)89>Ip?*^6U#wOX0MksCNl{Fmgg>bl!^ zo_;@#o6m_|dM#S&VY3;15nj#!Kq;)~wI0=p0{v{bdJMbom65Wl_Xp9U1)ARK;pJ61 zGdl}e8E>FOvWl1YIx#MU0U1ezZFR!`mN2q!KL#HFtS(@J_o0k|lR@I%&aw-uuAzX7A#ArWDpj|( zH|OD7c|D=js^oX?Y$mn|7>g4sz~$Y5j9L78VP4+R?gPWR9Q@)^G>H3IcZP!B&YyN7 z*c4_r-4^GqbJNM*Nw+j#WviILGc`2+8;bE`$i3T0}S;JZd1)sDgIGBy(Y zDysSUaU8-!O)V1oK+E*f0T2m)+vvHTZ1)tH zUn*JUz>|i~1zrzY5WOvObt!E*MIdUYoXbOXEw!h%T*+tN0AScL1A0nb%UH5FZi4`& zP>JsT6oB^-{2vzpTn(0uXU1wK@WV?y9zA-KE>Heor003XTvu9Wg@{S`myDk79evb2 z)ywcgpPPW?#w$Z?qwq41ljl!9($hodA|E*JB&^zs4lm09q1PBl$QByg0k(W z&@nbqlYkvSaegSMgPGgRk0dz3QS9@FloW@R`n7m~^$VeO@#m>d885TyWU7^W=P>Us5Vb69 zw-a^mz&#r<%l2Qr+FI79yM68_kgAS9n?A+x&j2zu;U)bLd3C2(onxdYY|mQ`SuMU& zJIyXvmmF{F1Smn6m`?GqkYC$j{F=2XGh*E;AirkUQgGrUceZl!Tj43Dii0 zn*H`iElVvI7ng~N$!uCn1=ZXTLe--Q!CESlkL^S>q@RE9()5gP_x-asR;!hurk_&x{gM zWq+#&*XcRkHZojc3%l94h(Nmj{oO&Q6)%t(MP2D2<7(lOvjZ}h`i9#Ye=;*Om)3xN ze5yAn#32~}s~Xk)SQKuz6sVr`bKz;u=DE1zXr_JPlGjybO`ty5;ZUyge0CLo{zQm< z$u6y{#=*SJ=YsW(F=f5zoym*Kx5ut0WIs`E^oLq^s*YNm@BZVNVrk&G~DfBo`J^~ZJb zOH{|le^O9hG1JLEy>&_Ajvr76Sr^^S&sT7BaoKPsZJ=#UlPEuE zmSvz*5xJ0*w6kssuxXTwea5%;fHK<^J1cT#Raq^8^x&};3M3=bfO@DCS8?S?VuSY+ z_GgLtWdD`o$}XsANp-z=IgeX2Fb{qBko-!`B^OxzP_<>Y-MEV&j>tLT|3$cPYhhqodimwdeu+cQSj~K zc?x!91VV&}^Dnz`{kmBmPig%9DWbRAijwycsH6KkslMdfPu|X0`X&3)f0`!;b(j-& z`>Rl{qzA}RbmwBH=wZ#AYcr3QY+oq|M~J;ZAkgE<%QwqSP2e>{Chj#Hm?boq5-X|% zOE8>%f^zZtwI@k@KaN;J7NPS4qNsm=h*1gKcjz8#aOiAiS$TS9hBeISteD8{7CNoR z!qKwMrA&$H2Dla36|Xrs(bT!2KM@ulzcJo>RVK-|kNS6(fB9^TzI$83#Y}sL>@?c} zy`PmQa|ar}q&@MhcxUoEiitTwePKYfPcQ9*Q(^WPUNBx4Q*mv;b@Q#UgoRM;L=wsJ zQz_EL!-``m#Z@l&b9SbJa(mmG`6syzvT)ulAJuczdNySZf{>;=iq=d7_$Hr zT4FHl;BwP`C}z)N=S*m%7VomznQ?)UuZ_xLilC7nCMXP|9!9VmJ3(A)3o@w)WwqJl zy}~AtI-^7zt?HB=iEepyJU6p=dI)^av|P^q1HFj-((OI`grYp=cp zX;K8MxZm<_cWE<}R1#*wqoec2_mHU#gv*vRn>`MBQv65CkGeYUS3zsg2hPA@jXkr)i4WD;^K9d1v?QDyeI@;n9tJYPJx1^jwb{ zX2_m`9%@*r^DwWVh#~%jHXW9GBMVHjZgcaY(#+-~C0pkLt|e5BgH31~O2uJv>5I%pzNU>|SgUOVeQFwJ@TG9$h+?jAq1zW%!`TpYt=HDO1VO*ZdYK3V$XBHui9ail27y4HDKFu zS5D}*n~XqLR%*<>sewbLwKgYLF$DT&zM!eVrcofdE>{Dz2O1htGjnrLEv;8z6BBGm za{&d&n2FU+iFC!EG1qdr;!eexm|8f418}IoIew&sFssE8AH9uJr_gofgJ3w9rS#cp zu`7b3UWVsT?hjty`1*jjxD#R$^!6-%RQVgc-qY1M4>jJwctBs-siBS{Q0Ne=;)@Q!<;%l&gZex5Y5~n~xVcM; zY^Ell+KEG%h48+b@s)_SUYFW?4YXL}MN+}I{;QTUkzVE?m$5&=Cyu^Ik=+MDDxbdE zr8Vu>Ffc%7(nys9@mj*2UsRAghw}PcG0Xbl2XA<7q@-f9J{)5jO;mYnq%j;dGXIdA z_WpEj!NR%T>KF1laj_MdH@N6RyG~k(aEdU}Yx+5lYp1`Bzxut3u94~i`_j%T-KcLS zJSY02xWH&U&T_(iAzsaYELvGZ*4?G?qI!LC`BxE_^VeR*7a~xCbS=F%XqJ^z0r#df z6R#|G1$a84Slbp`&X+jyy*XSeIHkC?c}vL_g3bgfw;$Wk@x%Y>BK{yzUba*U&h*Pa zW%l&VCk)$UN{9NS`Tkkv=zaHuGa7dvsuui#hAr_HSUQTwgGR@5AT-RqL|DjK-3)vc zv_8{IE&5_m@V3_ZW#2}7^(e|Pm=g8tOT=BB?cW9x)gCUo`+qWJW2R{1K|KuIlL%;h z?0^rog;io0D6)bu62qZu=@Ye*B^_1bjhj#8B21tQy2!TK8rRVt2S+CMm_15SQL(=b zNbq+5FX)>Zqq~Gn((6TtOWNPLX>Fv-*l$yujwnhKQVCP`dtYbLAk+J;{3pbxPVe`$ZmIZCY2g}hX)Zs&7+wk2K_o;VN{gQ!N_>! z=B~w(Pe^BY5|45Ez?c-uyLho_ls8u0LEmgR>^92+1@?I(@ z!7S6CURhS#MhW`0b%##Ng_33vHTi@goN7wI9Ci8KyqaP!|2kyozPYm3H0*|%n4A(k zOybxsdr=P2B8e7s8D6li|H<|kO{W?u{-Z|1qMsCZj}U{f|e_kmTOxXb@33)Ka&I`(2TktpfAeX`GbBkxbsl})?3(p zjIX@oc5W#e^@&su#t+u!Ycoxn^{T2~m{XDwBC)nSC(U>^U`NlZ2abf7h*Pt8yucys zuM=LOn_&a>m+yRuli;7@3tF7ak{!oo;BHK?oM?4)K$tnT%?pg(`MQj?Epchu?t9qz zP}SpAZYvlDbH9lBFtSf-AcX#j!BZ#NF%VCuxRAX{OG24{&bM!%xbxy;L!8l|kz|p! z#kAdRr7i03*Z|pb_e!4VbGL-wNv%5yEV76E{oq62pdBC0hV%UzJ!QJ#b7jl1e$F&y zE-rujNWb-82F5uo> zAEgVb#Vo0fg#0#h_$Xw#QEx5shz&1A!!`m$TvOgB!TraZ@HGyWBud-rA}HhZgiISo zfs%-RT$uI zmPO>8l9M$J{rfJ<6~EsMnF3TZ`TA98t(G)YoZ21rjtt%bVwFg^1c1T^2BHXB1*v+?0kdiu+oWxJIBu3HM z8^-F{b2kd%4TC@>V88W2VYJO~8tNO~3_qRl+bHi{ryLNM0 zhP=*3Xqd6Co}Md2uQ&#y&t-!WzLarSr%CC8RYnFUWtaaxBoD)P5lOi@^rYc*w?p2S zigO#%`jceY$4mra2553f7T}I&0Te3?h+1wxKT0KMW>N^}Et^6cbhA|&x)L21{03_v z(gt$w8Y|tHkd>dTm!hZz@Uwlz!pVyEMfq$Ot&69lq1zukfl56^qwPNQ1JCOXo&rBB z&q+3)7a6ZYZb=n-i%SZy=Uls19}bx%uqIb7DyBC8n&;}YvBjLe-7zm6iCYs(gi9tK zgkfu~xCGzN{*thOkjNV!I63=qUq?HPDofIN z*f?KI1Un?K_TR=6%Tlw!4J2$sKSe-61~2%`#^j0_xMpnW*+qrx__jFhH8uB=hMie) zVzTMNu5v>4Uda2UuUe^a5?)5aW1b$r5uYAxQQ$Ua2ww>`aK!ypO~YTveiW9Qt9B_m zn!A3o_|qL%>-V(n1i!>uJBPNBVj=rypFuysZ+XgY`L=(*F=tPFc~1+jkWW!cw=5IV zd+2?|{EC@vB7We>(o5ruDmO<^$lckchUMp1c9(BT(tgIRh0dKtx%V7hp$p1|n9cAN zUM`(zO-p-_Y{Am}aZ0$<_dy&OC_Jbx09^C+H?{nChtP5K&b8l?B^yzo-1YMg88)`< z@eD#ZXajEwd%Mffkvn7s6d+F^)pqK7Lb?IQ{)hvARtsisbliXT7`k_y^O?~Dw#qA* zu+D8#Fe*P=ay;kPbV)4F>#CmAS(x1LqA0TD*?9k#^YbTOOkK+@?S$1EAa5jCT2y>P z(UI0lb^4{SJJ_a~0>L_#zd;xMYVDG-f@9 z-;u)E(Ua$HmpIUl5-X5D_`_6sl1p#->4*xm(H7r1Iy~w@8#SzWZg-5xhCWKW@l}(! z^G$UrrM#Gon~hgPyTgyA=Z7lVoOKIV)ayizt$rD zEe1IXWV9@jxzPo+-g-(MblArr3)(80Q?4cjK4Q4_|EO}FoSr339pSrq#W z6Kld*2W4D4@#OS?Tg$vLCL@s`2k(@(lUZ1@sa+zSCz!$Px*WXNW4OstQ`ZHer(U)h z9HR#MQAX_wGrV4h(1{a%wt;f+v3dKC{BrON)~564&r=!7!4UVRU9>NmAm%YUWlmpf z8paY=SX@7o0-D(RI$k9YmlbLh2Jbw7gZ;~b?2xEN@NGpX&!6}|prMDni1XsuQ4>Jc zb^-Mkx!kkx$`gL8%+Wj#feqAo73&!x|1i-t$pruG5Vk*#FaFI-pTs8%7E!-ur%Y^L z=^$N56%eludQ#~pq%8nY(I9CsTaKyBgV0>Gb`;eUoo(?Z?p`*Nyw|WF;xfbH`JWgU(}RSR=Wz ze(HBONDywAu6$G6X}kR4bR77PZ<*VQVguU63DEj^RWVbUO~$XK?d3AgcqhW7o|do*P(N+}I`3=a-K{YtQV1x@J|6aWbdJaAuS*E zk9dA3P+O&!^-%Qfaqr7;*jp zzxFr>aN9^^6(DKBovh@4HhCy$cVmdMHJ9zLb87*+vlC1Q7HzCwJMGZ{gA{%+0D-6k zI*o?w1J-TmPk{zQRkmG>$nAFgdkdAU-9cym=^b-?AgQuJUI2Jv6 zmwTu!gDKwyT288mEQ7i>F0pqD`+Gg1fB}y(Do7lVYq>Q? zGQ*$2gA|tF{Xi=z254kEMh(4wn>Q*F-^N&$2j0Vi~Z`%E$Sc|XcS?bfu1Ta0EfVWWa!KqG7| zl=UnJYHu1Ld*jBpsF50%p=0qXZ&`|52qm5xgoS}Z;)|2#EI6K$+Jp-;o&be%>Ef8B znjW&EqC(bh$qZWYBuWfY{XIXJ`6)O8{3~=W)8BQWKVR0V|GNJz#uP8V;j`y9p*e7= zr$|`A+xh%6$jTYX$+IlEh_f6QWq-z9?C$Jj&G7v6@gpz5yZL`M)lD_*Ttm>SSxSJ; zBSOAT%9lP(D}Zn#eUiY=Umb!$o@am_Cok8koO-s}477Ifzfj@oJy1`COA;L6<`z-C z$+~n7*&Xl@5{q-6P%$OxYHPDt3~>5z#V2w3hPZI13F&9K0-b+o04l-ZUo%9@wOX}4 z@gm>UquEYg3R#Z=s8N2ido)0vhCQR&xD>jq?spC+)he;C*?hD?x z9U2B)nRFDw@~xR1$WT0N&jVyV`ChS42tx-E(Veypk&fE^o6_Xt51}^#9bb}`2XeJE zBW}Xxn4}A_{tI#>y+>j05|D#2lx`4hj4~$|SKW;d#%6d)=!~}Rr;Vn4dJ{Tu!`bwT z$wRnAOr}{)#`|r5q$^5j@uzf8_2b+Jb#_TU2i)4_o#O34z<`N>^3w*;R5yg#o4~Uedq6=3H_!hw-A)7=a2<^b^?cWiic|iiG_K!} z`;$Cd>|6$Z%?=2?BZP}xGRj3-8Hkkf&T8uc;|Px42PQYXOPg??nVE@@W^9-*b63;K zC>K?9ZG3^oKDt9BJFuMl&9jk)2{yhC-GCZzgFJ-$(|V7kEln5#%dh!q=pysg55Q+j zzaxmfzoCkj%mkH??OO3+O5Ia;VB>@|f)RwmqYyLmM8^hz&ZPo$u0U^f)?o8?GQ)9( zPduv|pzCjf#J2Lr8z85O>;^)aX9UBVXR^TP=%8@lzx{zG6(fhqW5 zYq64&^Ns6T&5R~Tft6V|pp|*K`@nyzWMchS;DFSkl(kvpLnF|4NbT2=sLN_a{1p)u zIW4+6?%ksd5A!I#!Mt!zPrg|e$Q4X(7!DXVlr>P(aAM#X&@D_ui^qN2mmmk!jgmjj zfNQCP`Hk4YcZ;5r>T;$0o>6-CpOE2AqJSD@peVfnLYQ9$2*fZJNyRJ`4xRu5O zP4io#zF+44f21N%(^7Z#jx(~tv&$veN~J2(IOAAx>0DiXVS z(?eJJ75ElDt(B!Q&05Dc7U&=*ZCY>e6ZR(4z5+%F^w+PbRkWQTqpR;5dEBau^aw&3 zbN)pktT6UlhS}~LGY5j@;7Y$mO3+W?5_<;+;_6f@3^UJwWXf$?$6lDRwB(2nU_&Rr zM9`m?&jOB;w^)`=3V61*nQx~w^8LF2{uJcMM|CHldt3R}<%5Ws&2Ka6O(^6GRL@F7 z0SHuoXB-m|@pUEgIj+Y%Y|di}2cJS+~*O+-3wr z9p{{pT)gp_?w>z>5*?ge|5Q5-xB~A-h^aBV{Vz`e<_)~A;Gx5cAne(keyi7ERPEIC zt=(9k#J&5F>j68Pf5cC?$U32f@;5#WuGdh7PrK~i6e|e^0@0||{ZNpjAr*E&5_J?wTstY0VXeB^%(3L(M9&NXU|%0yE0>AEVk0pXm2o(ZB1>qQ-j)+k(0NP-0;7+qk2c#5%A{d6`aQJ%=ZJ9>Z0kI#D zW-!!=>B>RqU0@$oF!YgPxp@U;VT>Gwk=EtJihH$}0xzpz{>85xcocq+B!t_F0$Bvh z%A5*K0XLHj9UGnmq$$`}RlvI1za#jXS_b4R`Th7H4EI7wge>U%H!*?NtG`U2^2%bh zFuKOZ9{|17D5G!%awXI*<0T4=S4#kQYXvk$FDvrp+SZJ(h95dI_7uWc(Mjmu&kP&j zx|j0cJrFf5u1UsAC_o{okvyFLzT*{F=3n#kjsu%EjDuHi=o)KWU>9}ME`p+6Jrix6 z{N}M(MQD|MoS0SP&RgC8B#(C!C%qm_4KidmnGEsY{=BBDSE6lFN(%Jey$FUw^!p)P zI9v?$*z_`*2ynLlgfnty;vls7b9aC=GdU?~2CD(&n8T|3Tl%76Vy7&K*bg5#=|H?% z5>NO#n!W#xKngVl^!A01<#cOG`UFm?GVK)nPgQhs6a3WY!$IxB6o&m@p}V@%J1&wp z4zS+}^)|D`Id^u~EzR_nRp3PlV!*3MbffXTy^~noFhd^4xO0R&bL^e7FKoj9t1T|S zn4(S%Nsbl3`vHY4{ww2zrm%f=S-#wkD#ki7ose_O`lTQt(F42)fH;eO z_U8$pPCS)yAbnD4f7)-859stJ_V+LVbVSn4@GUB^9hB)XVZ8aml`(!*PAvJ@dP#ySheE|h)AScXB8Wvp4V^pkjQi6> zgPEXz1L zug$(zHV*XmW8fikCdNx<=*6OsV0iYmN=|<4(YMQbtK=F63@mFSrJ25DC^u!zrlGSL zAy62nd-7n}*GEdh)ytY~JfjuS+p^TrOaX?Vm{_$$`j>pQD4E*2vvURlK-=FUczW6w zX#oT*a07`9tOEyl2V?KeeK$*C>-2>-PkSg#WjlL5$qA=shY~rdAVsEBs!$ z8t@LRoXvCklQv?lk&EHGMh(RS9PIYd55U_;{^p$J1r~{W&HUEijQ9&VuFA8PQr5q@ zKYz(mH16D)m;OzCUHa{)VlTel8C(=u{m_Q3vz2OI+wK0a!yC3A&^PKB)R8UP@SD&6$J(cXVZcVi zJU9GnGv)na|3${ZjmTNd?^=DIH9mh>><*z?`)z*{(H{__Kg)~j^%VZxvih=r9o6>j zz4Uv3uhN|a<4D{1C2Ri++5R67A;EnIP8<-AGD-?vr2zVd+0JDJ0bivUnd6P>G0%3l4IA)_sStj=rV=vqPi2VQyEMaRI%$bUHuwy#GSX&C z-O+ct2%RjvYdMWaylXp%c#`&?q^peIZ34boOHY9rK7Gzo`^r8mTcd2p<#hrvH7mzP zfa&sqak1dol0SUG8^B2ee*RqKKj##?{?er@0oI@uLH>2kJ-^>q{r^z z8{rBo&yD=&3vv3wrH6CBCNK=2b#4ttt6GSLwfTcXD*|t7eOCYfkA2lS zWl*Nne_!qn`SbU{%$Jps(Zga1EnmLqczG4jTN@gTp%lml8=nS%3cTo9=1=G>LiHvM z9+~S=T$kN@$;W}Kt_gw%Fx#<4=ecCAUB5v`T|F3RknJE*wh+@!bJ>6Q)3g1qV9b%n zG&40_2Y7Oae|D;CV=6*EtRC}j

|!1dmtqv)9CIY!7jkbvqSY{~~cVqp|%$c!9-cqdMr z=zM@|gIFkQ!$`o39-S2v$Ad_Xtq%F%H4m9Pu9P@ZBZX87JRhjhJJOl@@n0=XOyUT7 zq$<#ySt4-Ew{Le^3v@5q@L2G;hY9M(PnHPXoA48RFRIT#xX$rH%b!@J4bZ%O59lRZ zrg6%1VmK$DV8@m}=5a-o+;Snb|t8~>?ks3-i3zS|$>3YtJFPvA=tX>WH}SR{{7BA%6I?0vd$Pb?D?oBg8j&2C)i)}}*xk(aNV zI{}Ie=XTEh_M6-FMm#6OChx<2?LIRu4K-R((VCRBhLq_I)@L$jhj!0M+68eCszOR6 zX(bJ*dy>8M(zLa#(d0+Z#LC`jkQcm}b>tBaLSe5*28eZUAP>x%V(kO@^NCM)+SMM& zf3k_!L!W%n{50+fimMk|r}hpKrpvv1ADbCLI`uR06y621GiT%0J%izfHj1|*?z=er z5ci*)tqR-kZ_>qncoD%%*i5(B(qdS(Ao23;c~1QMCuOnsKFmB60mDB8+Ch*^F#EGW zFdW2LufH4?%bY+W@m`aFeZFJyLj#V_RpBWslE4p_C`sr$CDj6s9-}BR4S$`u{Ii#q zudnyMcvD+@V8IUaBNZO#yc_GMja|nXUgw&hHUU*Zxl=38WWWN`Mv_OA#4Gs(R3`Nl zq6{)KebEx_<4^r-NoD3mMMc@YN{)Lf-`>B*sdCx6$lyq*qPw}!G;BZ0x2pwn70{O| z&taa_*KbKXL-VcxB_9~}RF!c{rA1weMaH@@kOp9p7OBhzBnMhXTH&N!nI{sQ62T&Y zR631xt%&rjv$M0R;tx_3{pHHE}^lSA)Kx)kWnqF_3WU#G`BPO81W_IY&UXM@VlIY=e zS?eY!g)EMzJk>+#e-Po7v2r0K}IoLa@aUryPOY1T|sPg*4C%&Vmcw}`i4^{ z)mY!&(p!LGW?!>!%P2u02`aKTAma{blqxtlI66%~MGB69dz;#r9@8t;}R_&y!Q$fOw=$A?;{ic%umZ-73U z`*m}rsYo<}-oX-h9?)+T8jWRVXAh5xVy2V_1_qMB)DyvcKnNeGn}}SteIq}o9+1mY zIK{q6!b(MuP3^%ahP#}swHE~?n)gEUTRr$OuJ4UI!FiK!1IUuWd2OgOtd|4-I=;%` zGDxNWu0fg4ry|ox13uK{`M5`WowzMofE~ZJnZF59c$~MwWFInj9E`#gWZKR zPK+1gY9`spEqpx#bUgaDwzfSTAyzEhpW=Z&vieahF!9`JoOzC3Sg&Q7a}B`#ef69G zaC{0g!97Q)DnWbCvV^lN16B~i3SlaRe3rIvG(m3gyrwl)X26WGZm!l6CU?)DKSva7 zFa&ytM*jI+5-34aZ5-6pf-rgtEDg~pokAaO!FN4vDq%*-N{~LjV!hEhIh*EJy~wkG z3kqLY@SxmlS{yGL7cE1)H39F+dA28RhvzvMXQ07!V(iD&V-~yr!7hAV7LJ#VD-A#+ zeW#VN%d?XYX{2wPyN`B|WZcFKS`pJp0a-g2t?sHS6$?4MiHG$d9gbVleslXEVs&2a_K`6!?GqJj(gvSejt z8Sq-EeQ1fcOG{0ynT0ttQXCBq4i-!Q$DDcUQ7>;PF^X6^!4etVm9b{)Le@hvlz+0< zP^ukCOqZv@EamjRPMBx|N;`tgSF^8SP7uP0QUDM!?oFw!4(iq*szsfycxe+9kg}19 zMoM}itq{tEyqmE0_?d?xHz8I&$fbefOfRidaO$m02%k3y$_&Jg9?hhV7t#?&K>@^q z^yA0Bg01+aQZq|aI5EFs+!)p{(e%S`aRDv_Si)1j4Ir?;nj~~`srN~*f`$8t^(t7Q z)8{MXHGRtylvik;`3GEz83?0!ujfeS@+fRt8Bl^#r5{NFEm!sWb)bq`96DWZ zDYosV{{H^1Q?;^mWcs8q`H@><|Ni|B4|89~H#apQqD}DLSnnCa`3Lfbn{hQMowMFk z{)||e?pX=fnq<~-?52q~+DJ=Fr>S%%-4Nn+kCGzn1h62bw8Zrkw06ID0N-r^^hZWz z4uHvEI5YCjiX1+i>sX8+!(ejT5F~^YnxlX*W`&7q>)pNk^Va54UXsH>|hV_dw3_dZX+Ky8#IG>j7~ z%m?Rx5AmBot_Nnrh$F-(%;G4_G-@4du-u+F4&GC9B#YN8h^89|DPuXZ4EM5>l$3Rr z$nxOZ-AFwcO$5!bPG%{x$p4yB`epVOz%;}$dE@mriH@a6r2&)eConH=TUI^B+5j%~ zITvv90OYeD9g?s1V`>VWb9Uy-Z3!^$#XYf8KY2|=M1&4yJW^33vr9Z4zuAC=_9^nF zy>3ooV*-a86t}V)bTFG_(>GCGf`2ls6n0Oc4`Z#BSCdZ~ME!yzt1K_i-IfoGi;DrB zO-&lDJ0ElLL?SOKHp*|{GpITe&2KW_@I_kBkEL9G@nQ!)J)QC$e`1IUh8~EiGxNY% z_X%UrZ*5&Y1mua^4a*1-VTRQ1T}C9BP|D=_+!xn+3=GH68;aIQlcE1$KC472dWoo1z~M-2@($776`E493C zb}#3s;L736`*9h4Q?{tMXi5USpSXue;ho(H9X4*=2XF>1BP;asxFP#vCOO#Y)6L`O zFFx?*;*Z)D?3iA?d;CX(;6dEI_&Lv;Y;WU>?Ter3 z*y=4+FviR;+Ru>BQumxaEDTqy8Zr>RM=#*^E=jpw`kO?deadI)%nk6Wpm*no0uQc)Yp0(LtAoy}>-qhs!mtaFL=n z-^l(Jw_aRQwpvuyE?dp#Zt>&q64?R6k;Tskk38%p;|a-GGu4btlB$JXNnMbnl?6_~ zxKBN=Cd+dxIp^WG;^#uz+WKa6S@Eo}DI-!B9YsC%oJNWWQe3&fZ1+h$d?+cmuS#|E zXdu;|{P@KMN|#2xUuFB>*9O{sw~K6;J#}l_5vg{}$+rsyqjmab50u}U_PsZ>OXkb< zJLrHMfS5!FRv^3p__6sz1>a|N4DP*{8C0sfa=q}=iL6E9!g;lu{&Ndp%JYU0J@9?7&e_r< z9edg?d~l6GwMDB@M-qATQ|&Dt2aSYVe|9_`uB~Y&<~B;|mFElZ@$aLFe&nD;aj1@D zUS?vMg(rBUmEG!z`{joWcdKXFp?4I-Mj1Oc-b&k%PkC3hciw@7ox0%A`t2{?9OZ8o zI<6}36l9w>m$Dxhr{Ls%3}8u*yAu5*h?K#XcU1Q@(06#wWelSkQ6EOcpL|xKcxChI z^wR`2ABFf;d_07g|Ha`J_y8{|yEAX?6ex4Ysz+$uDflYe<`gdR!xHou0YPrecE&GL)32%e-P>u- z#X7BdGlJHWfyv~`%{>cm*5}J=T}ysFM!v5yH<~5=gh52pQSEO8nS>{E-&F^M58>GN z_+7j;^DguGv88L%r|CIA(E5|T&f%}=-HT^;k1i`F=-ldcrIT>Ea~*q!*yY;&9KX68_es^n9COEv@i2-{;M#-4me%&!SAj z!M)5vON%kz<+a*c-*|1x~=XiVh3`VAA;YleQz_h-C}xfCqjg%mJ4_RNUa zHczHFu>QP%SEpds15Bs&4b+)=Q_3#Y8MimiWMmgO*YEA1zuz)4#@2b~$=v;nmzpi^ z6m4Tww_SSm#5)Q&hx+$s1@F@r@1Bp|_q6U^VM>*#43={CB!d)lqrfInjJ6?dZK=Ms zqaQ(kHh43+U}F&Fd1#$8ws<1c_6CEWo@t#N#?!wsSn=lNA;a;Y<&CH;x1_lM@0a+I z;OKjWTlmk``VWl??4lmq-~8$W)CW(zZZl{Wtec+IrnP%xekPz8~S3G*MIl& zBE*6>>(!!Aab5TD;&fiQj_PfOv8WHDN?-2?9RGQt)#YBd ziHMHr59L6$mVU*HA(WE7I)|_x__e7*#b&L5DN4FyoSa%Hzh0xf5o^W3iexQ~6WHWJ#Grz%#QBH)72++s-bd|U3hAH`$nb)X)rO5e#i2YuA?*E^djbUm`pY-x~Ocikg6gkE_u31qhNXRc#ULmU0h*m zX%#9h`@Zae&gA`mR7|&r_RW2%?!UfAMC@!M<2I)b_&U-sJ?`VRK1$3EuUOtq>X~1r z?NAxu1uWLKORo1^|6zZIz%tv|yy6299oLZa;jf!p7r2y4oIJN=7f@wAu}>6G=ya_~ zjn>=JTJ6(UMD6=Dv@cgzjgHT5Os0}VOE~(DGcwm-QO8xTRgPjTj&v1#R^f2dDKZh! zo%1$JRkh%1Ree|g?C2OSIkxjmgAAqKGe&G1UKKZ95^6v~p<`x99%pQ)N;3l3M+Ur# zI)+uoqa_Su(U`>_vQ#@7KW1FiZtbvMR~c0*MN=fvmGtl%=bS>aZ(4U!M~A3e_ga@! zh^JWaCqO9_8Oe`NOJgWC2Fx-s-f1}=*8~>^RBX`pl4z$?v+3dbIUhd`Bt;uuG>{(g zmHknxsU7BNKS;@SmJUCDwNJ>|gNp>`IevMFQOsCxy(6ya-937)xxe|h*%?O9+!ll8 zgsl8FnY3s#HmXefx07i&vk*?>hFYHl$7ja;elbQO6Z z!k!HLe4LiensbW{oVCMy2`XCm=PJ&MF2u|J;CSQ6P*i)C$WGRa=S$!T@B11mkyW0X z2M9yg`8uu39#biEYG#4Xo?F~_i2JZQm~WGwjP_+cCA!Wfij?9cxB zNOHkgIo)2(BJp@W;g&xtTd`6kaj!#Ko&n{ItYYSq8NM^^!Agr)6h?mtb$r=uM>bq9 z(lI-;d4!PUGmxi55#LJCKizETHHh*YoHNR>94~0+aKmQwhDs8>%ebO#x4uO&JRK!I zzgwr(MhbDd|JFU^%&C+O1O5H&EW`&6)7suhvX3(wSLTCewP+cw@d>osJD6)y_YpY_#cZ8ywm!FbQjwxDjg8rySh zLGM1buv0*~-&8rF#YNtKtp0%+DfwhRB1})k~IIUjbzc9ff6DW&~Y}(6x z+j)4*q2rzfur9}ejzVU5faY`}{%d<0-rV~st)Cq_>G9}dpKHss^DT4A&!WXG45ir? zhGOksT_=ugWW!g=9}Sc_BU|QlTiVklKZ+BhW0!{&6UA{`=Dwx6rQUZ4As!NV7B*ZV zjNwaU@Esj9z$Mdk%;0j~j8G?FL{pw7)2Ee*k`7@C!a3sCRrLl=offXs@{M}Syu9Ac z?xCCA9kBtR5?AQ#8teGx?|04a#D2Do$?14U?yaNknI*V5L+M}AixV>&ZF?1N^@Xi#HhdYu4efUJT`#%+0LJ@^)6qZ*{NE{A{gTKp4;9MjJzI z^Yy%$dUk17RA&M@@-x48@gu+9Ljb@w;8X0uU}^+0%&C?t>Rjv-DEP%?1oK*Clm}{o zanW)t38s=TqIgpPjoe1!UDa^4$iu zBRdL}Kj14_yJcp^j_=j0ocZ;_WE*w+AE6p24I&1<4~U+7Ikx%D;-vIr=|v(2VUtp@tATaPb};LKjiroYBe$!ShqM8AWHvH0%w1#25?hH4*; z9PJsNiFx&+*KnNBn0wstQ_HyO)SGQsdF?Oj4+={*E-TVnX*rYAwL=C$O0*g#|AR9w zOY#iH6?#NbIo0d2l6P!NBY#1wWt;TdfN5}!T9;Pw7nhJ?Cotvrv@Je|-up>N2<7Y7 zug@wtZ@|QL4D5yISt_gbgCe*&lnhnOf$Jh`w$ zEj7`u+Nttv`mg-e$~|WRGfj6Ewg3QKJmNhm{#j|L z^>LP0T4W--C-M5z3aJ#2MBV5a;qIWdvy9Y@R1~I^P%}~(KF&setTApdekLZ}@e^E0 z7tC6U%zC15c}KSMgD7{ggVc~HiPKlJuZKi=ypFls&oy$N={?l(vz1|&WVCX&dYIO| z!1>xrmKOUoG*{tqaO%1x{$)%2W?f7kjXMe?-J%H zLEun8-Ks%~;x~`zPO2rTHCq0Nqg(5#@cGp2I;X{sch;u&LnsyKC_)KF?#%GJRFvLr zCXMq=kwiDjGa|F`x$D9+LzzN1vH+86J7B0!^a_c(YG)ggW+$FmA;2{7A{bK-r5A%) ziX%FWC!=1lvC<<^1nS)5jHp*H=XbaIdssU9yXmu$9q4;0D97%nJE7~65*TE2?%H)B zLdhPlHDZx-PB~CrJ?rJIi`bqydm;jkOb5nG6nbi_j(jAJnKXtfB2X}9_NVm zo29*abw_%@s8`a_p|SCJnt&x1=daJ}ULARt>{WJv@~x@oc#?YMn-q!e7q`=Zg|fCAcPCK&e2^{|HzqQ8bFyS9H&43|5kW!BPs?xfN+ zh;y5xh3(?uw!9dkP_5>gIM$#yrx&N4-e|Z!#98S3hw_iYdKWJec;9K~7U!8wX=pbJ z9eAC3leKbn1#_9AlGSdO@7}FS!7%C)clL z4qAwn2X9_{@<0rQ{m&J^g<`MUpNK+faw=T>dk zb}R14G@SnuS(>sU+G!J*_{PvzkXexVwn?@XJi`oREpZ{JBV*8qd*x50e9k5+HPVv; zvv<04o5gpH9{G|Ydghc!nZJ49w1&&Oxx7#6>KXH|j!={&GOe=+eT%s4xOQtp9lOXj zwzz|wZmFBp#0Ltz`|>>EOpOA=2n?(8&DuKs-_PI~Xk(rzvhL3S2~>8{jI_4n^!5Py zmGjaJ&x`Rl`I-J}3wm#;)o0pyDR;ykZ_=-rg&TaFsYKCh>udwLZ%CC>q*;gUuT$lh-t1@W2Q0Ku7e&<(!*+bV0RTfD9 zFdGK4k>@bri;2=|SrFkL-bI+)K71hN^=oUdg=t6h2QCWr)W46A2GTi;FhL{}9X@p! z6KVh@7^auNd}0M2!XTZ1?8eK=f_7)~UZzxaaidlB6oFy7K?PpZm7mc}2tLeo&$ z1cwyoBaG}&$;#%k{cfRY(1N3>uI)fQ!_Ex1AY)Hy~Jc^KoxOu;Rj|l5nva03j zX7DG^E0$oLh=VO6IqE9-_Cghzqq{b%$+pLbDB9F3E~a8a&iwcrA?9a;#E)A23$hPg zEBGvPu8|Ghd;S0Y4XUfFNBuVL&49cw#EiroCP{vn8ZUx**r|A8@GXbJk*w;@ktV(S z?8t{Mr8FK$J$~TCQl!3po6lB!6fs<*>5wi2{DDCqPfi75oc!X&i%vOH!GqB6Qh+-S z37XC<>(3qqN&5iI;2};*;Dro}K$?3G46&%}r2Eq|FAl5P0>LXyCDlwiT*ukP`UL9~ zCnT~QbPz7o7ii{&(ta#1P1Vy?r3&tVv9lARbK0fvj<&tiv=&!~trBS?UDM<3Yf-m6NIHFD$& zhL$>~>~zrYC@}Ow_(ITKQj}oC2!j0L^*6rcPtS%r=P8#Qv3uzTwxCBpsL1G9XI&jM1HXEqz z&<>`Awl*DYLB;6b`kSp1dBTrwgA-Kdx{)$qpgz*}!WnaRWs!8S))l)%19JVCL0&SWi_PNH4YC)A2&N zV@>}>h2#v+B>9KdxW1X%E7#Dy?gp!e4LrQ1pHr+bEQDPrV^+b53?FU5wUlgJvd$9( zIh`M24tiH|H;Y)1e=c>`_U+z;p|PLpFdTxg&=pAA)CJ?$5zyyRfYG*Sr`C&P3m}r; zcDKSQ!6*n$6Huy=GeI7;bBL37YfDReiWa`}l5VK5S{9fzKM4$U#IPenQ+0*WQvxug zD}YfP44jc)d6)=yJ7721f9qbk5|H@*ecogVFfHto596NDeyH$^q)vY1zsO?6Ok6q@ zMHGz&`);^YyJ;RJdZ({x8Py&k?2C?y!l;IeJDyZYhcs4#QtGokz0D##$onfwZUdHn z?#&S;;NRc^5g-0Au>AuQM+1MIu6J+2Q&~2A9wC1sGDN$#|2CQrh2#Y!f9STle0dNr zO|BOa*tmWT6zPB%&aM-x5Qs@evWdX$bjNQqUc7aMMQ!IE@7nYW9Az+U#|7Y$)0+5C zrt-xv!q^K!RJ1$uN+DSzWb}#A(z`8?et`>PT?Zfo>0X~DM_EZ7y3OR| zWSai;^iVSznc#!NWDXNKr8OyAH&zY=Xis5qazJH`lw*QQgCdQg<**Z3|Bl_(%T|BP z7Od+LI#cTasG@Z5-p%FNxibg%=m9!OpJfug1|c!Y0kJo&U{;Uv5(!7LKuW{X(hJD< zf31Ammzcof@5y5v8?u(AgmyA?*hXO7K(l?D!JYL z|L-KfDLD0MPtP^96O@$xsk(_;$L$z9xVc}is!X=VB22`$@cnG1(K>+A+|?iV@#Dv~V!!pEeEF|ad44f8 zwU{57e;#grv2crTmgUnkes4elv4767aRmAc0ZpYL(WYXk)cKFY$65EG@&Xi9e%hgy z%P^_WL^S2dINo`njs)d5OtjrjUK87-1i8nls}J}lRj{)MKT*$Ke_mUJe&F=&DzI9j zH<#+!>zv9aUYLZ{(7vUmw^fKGRS))mKlZ7&V`(6xbOIz=V-*{H#+=_~BfQE32M^BG z?;#|qOv95Y$R2EcRu4*bE(JGs8XK>iEuv zb@Zn%!O?5=Oi#NYToP#4iy0v!ps#lw>$L95!#S6*$Y=Yur=8y88f4dm<>uwJL;_Pn ztq>>=hy}f(#jxX&tr`z_f|RJMA3V0cuYSGUOQoF`PHuj*H-MX)n??OX6ly@x=z)^c zAE7{HAz`qRhvtS`su9_o>eZ{8qFJPwm5RUgdO=yg2PWmW-(Sx}jKXpLw5N?ge|BR+ znB5txhJ949pT~j>aW!LL;%tPhepLQ&n{3fzD^AC7F+oFpeX7ryjBu?cITN*4Ed`!# zvTx?C0($Fp+o#QKciRIbDBpi#FQN`NI@ zRZWeA&{l^XP^fSg6X!4F67M_XZ@?!hE>4WH%loYL2w5SJ92{g@2fJnTWw{*2MB{~j zCn&Xxm0eCdVITk|7MgtOqbE;gK*;5FMA(b?kzm`i%FRo`*P!DvFZ#~vk5?SZJg z*?_JHHP_LjM`wfV@0AtK5bSgL;-v$_cPt?}4>!(CKEy;j^+-GzR{JiEX2K3MTL5^h`e14)8PKLWdD=m}+I z<&9}rWQlM5ri+L_5wG=GywP&i-g^`0Sy`=6ui?(?oCoQzz1`fZ#eK}>c-|ttsM?=x zl=tU6GF*@7-Le8Ey(YF)MK5|V+kqc3p;xra;qZ=N_ikhccZeCk;}NI7atebI5Vtu1lvyhWt^J^ zxit{M)G5~sT7O+Vy-0fc@iIq8M^N#}xfNvfmJE8H0YyGiS<3RZKt#p*R$>v1e}?A@ zpepd3ZMQ@W&?)+Mx#e1agFpi+KFMr-`+}C*Q!E+TCZovB!^rmtk#0g3RLb&iet%&0 zDbOe06x&>0S-CN7^1j8L1g3oIVRzhh4ABRQjx+_5Xq8Be%L~|U{z}98z$Fb!GlFyn zkg2X%AVycLqz2IY>jPI$brOk;ikeaF9}T{@4>`|mw5>i?cGRXrpE=UNIjT3K1ndf0 zX9%~<<>chUMN3LPXCKlLqygkkPFNOAvnfqQqGrU~scz%NBtgVx=}x+1i47uwX}joJOc~sg4c~I?88r__J|@5shz2}m)1obvXJY!Z`>AgIUF7Tg5ALD^CCAqskTE1DLF~RZEu- zIN3xmoDU+c7y)N-C$$IhBu0kf@teu|Zid-@By$cd^5GL3rS1j9{rzcEL_cgsN-&PP zTTYSJ@|HGwS(+B!iruTn^Hll?@o@rya1!`-kq)|MkAHzzNgJg0;1&_etI|Sa$Ou$H z**~WS=t$h|B6zp=1zRIWaJCKcBqky8*J)1)B3F`)z!BKGR?-WI;B+M{GnW|TRQ5)K;hfa{D?N~`0>v+bw#D1O-n3S<0q}K_n74x3J-_x zMOI1y{#2*#y|7Zg>)Rn-sCorB^TS(GNq*hHc;tN=#xwQfN9Uz#`+}Kpv3HPb5t!zb z${qm*f_-^tv7VG4NSSxmW4f*TQi)+dsAp9rV~4i`_(k}b(6_C%ld1iw5xR3@U= zVKX`G4biU~oja_Pa%UdlDu0&@>pJ!~2yS^~cx}ExbLQ+^r!#i2Ar7ICQlTV8QwES_ ztPT4z>pQHdYv)LMRYzk%qwg|P9 zs<{be>2` zMjn-0B3f*}0EI7p3xMSQhHUz^>RstZMHhFeNsGII@ZK)SR4bBx`%?UA7mA-shn~u{ zP_tyNIfy48tLg$L8@H2Q)}kXqOFbLyf=mwoickDs64YEm1B(8|Py2u;m8#(pewyl- zsdd4I4dkWx+OS?Qi^zNE^Z9T@8xkWU+orodkxM4~H}5>9_8JNu3$_9(vTU<&SEfZM zV=ou(tl-?Q3P7_}=_!Y39!xQ4y#n7aGWs?bb}cJbJtHFI|p;VZ{Q|at^nr z7E4VY8XOf+g90hp@nfh}CUHk`lgb)Zv9*4IZaRAGimmMtsFWk3ob`!err=3T3RTQh zj;-ak5`3DcW)CAXL$4Vv#2pEXcp{LGv3+OdpllOj7j9SwQfA|I624GV57ls6wP4l} zYB69)6?j9PH2r3Iz5a^h$7~~OY;O;-9Z4hT0$(6k_=;{^jYZnGw8wF`U7?PXon4x~ z*)Wp2gP1o$TF)Li4%RtNIBK?D);LBx@LA7uKBq6(c|VK#xvh}fO1ewxmwd{qrqN+r zZTJa}BoNpB_dF*YktUj5na?)%_P4w-_Toc~8~B^Dy19-(Be9I$-%`lYwB&`9*HLBZ zKe_)?$@woi=qZm$d`o+Dh@)A|YB#s&`1 z=|2U_f8?Ko+ayE?97+HD_8Ni&W~XHBKiWyirbWIO8?C&A2h6&cejn$A;BOu7-`}}W z{x_h(4pjxLM;bAG3N?_0Y678L`4RawYyrqjE(Wm$@p|i{wN(WHdDDiZNB^cs$lz%n))B5sV9cB<`ALU0izgm z?#b^9aHZsAXQa~V*~ea-mvs;Lqgh_ls@CzuxVLqQuZg`3dAR<0pN&=^3pbj=%wbnA z>%5LxPm}#0`ReK|_?I9zmNc2y+wePR&G*KCN@jhxHt6y8Lv8jy-S2$_x7&*3p!U_2 z%hC5o-2;SvtJFJg8s4H zmmyfQb*l*6pEvpEIrFWk`1=^vPhs%n>hDkdQPy{`tBU#a{yE~>&dzNCwdwm~R*ymP z%IZU3EV6nnd%!_t^_WHYPeQ6%`hQ+xc*<=N>2UM#?{LGVnxy7y+^g0IzccXQ`Z|40 znXvvJukWUyUio+;A6OtDSF0niV&K5N|9XoiCUv9Bl74RgzkkPz>-pD5C<1t5XKPFR z@x!Ur27_7esr1gPgo$Gu}(lS43QImnVt3ge=yKQm%X!8uX+=0zL z^69IlaUVWp0A^YWYC0_9jZh%esED3}0K`@Ej^gnzmNL;%LI8QTZ1MQ{qaUy(6Hs2j z>#l)*sdeZTE$}7`X0B6fv^$Xc6wS{upujRNstH(`h$;pw6QuNA0ymn{7IIII= zN(~O{U_m!xcV-eiSO)&2$~}w%9IArdr+2ea2I4XV&8RV=YKnne-83&G_Y+B91&xQ3IogeWuot5o3|C~0p zrSkn`jrYz*_l(yW9kC#D1*$z3LQf^!G=N+j!8w??7IYY;Pu_9Z-*`RXS77p!QHnKH#+^SC%|agx%wAhOGCM3^}Axltxn z8$mETH2F}2Es-TvwOh7qL1}8)(e-bp>`|bTIMOUX%IS&uKN)O zy=NPgtKTF;{`z-h-ZGt2s$B=Hl6#--)C%ZBJfnK2A;B^mu_8yM>hz*`_~o4vSTVGa z)uS6BiC(9?*9tmH#CfY7!8wG@P_gRJQbY6?eWumO9{gC(4(2Lfj1nzFVBZ)8w>-Xh zE#Ms5gh80c^gkXn#Ywk8+`Ks(>0;1pXM9!$d<>qo9P2Pz`4J&w&p6ZSnIk={mO24$ zO*a>-vxZN7Pdz2)c+DRWW0xCvz4C73*5}ctM*qGlzQXu*DTMfxWf}iBX7%O3td=0a={Rxm#J;I&dGx z+_M9X58{`neGD7)!!Tn_idH}**YlP`dL2n-7x?C=*3H6;5Ym7Jq*d}dK$-^~Y8zX> zA4?T=n7DRJuT`_v<7REtn@wFmk$xA#)Gl9|cb>FBKEvv>RsfnuCck5L^sXz6&f5{4 zFQVI@lf`0+9>)-FwaDqnqh>L$e-XQuUfsw7{g)>+R9y-Bk%1oI6|H<`V@n)pVUR|& zc5DW^YuXH*7PAD@I6DL&o}={4AW$LV$QD+?T5bF$lB*` zH?`QbHBVs~&N^#hE^*z!z`%IC#2RHSQJ-)w)$wEGBNem9^z7meI5+=iyx@uGlbW&X zk|EUK61rJ;e45D%4-l-a_K<6a2n#^iwzuiz34?jWtBREyiJX>CfSVUabLmT7u?_zf zdcFej{v?~R0}oV`7OON~5I zT8fL3_kh^DMJ4ua64Zo^kzrxDFYk*AfA!Y7{yM0SXg6b=hc+=!_VhJ3uJ_)q`^x^P zQ9%v#lVOfcR|O`i0?13sLG<9*wQ&RabVOv{75$93D!`6wULFx>Ukt@l2#tEhlrGn( zYG1^X6GF=CGedAZ-qq&&g6;zChjI*Xbe-XNmu+HI;8gsAaamGZ&Vj`h(o zA+!yO1JTYS5a;W9(FU4l)M87zcfZFjJf3tRZCzU4O|ic)J`OGWB|aytAv7#~sIp!^!E;s0uEK|phSFldv~1?CD7vO3cwWog*0D_pv|-gO@b{srf0W>><(WcR&!?MvplC z+UvYC_mpynPe%0OqQK?E1cVQ2m+^WDC)vBFmpFw~puRIBQVT)p12`Gj5W;Vlj{oPygJ5j@9b=YL)msYk-?*2JC#`KYuB z4T4jLEH*UD^&+XY(ya!4@4ukV(Pq|f;DltAAq?_T-`qD%Femr`$+jE08r@AA59whY zPvu?vS4=0kPhCxVqn1Z*{Eq$L2;GjDbW_E#C>JL2NLIMY4mLBjU@b#K?@xffv)<65 z1-uKneC97w+lIwY4O~Y*OoQB)$+Lgtvmb8z}hNV14$2$0o>raF{kwhz2M15osb;P90;~U%b*5$q z37K(?k5!qrqX8%ASp}t!RTUQqZ%!zdZJ4=1e)(;%_>Hh9M+&wjPeZ>j?@-1@?B(5d z)cY|4wyiWRvaAojrL{%JR3&eQ-yz!k`)oF%uewDL6a6$*haR$vC%>=y%6&W4v%8T- zsywH9U7c%>Ir+|PGug5-pqS?OLgOT%wYV=;|Lyi&JYGFQVF3Xi`jk82ql`k@i~L3D zJDq$C^MK38rdC7_aJuEcB5ESyA&p8-Qcl|IU12Zon2XWc8SPU9CuUvot--5c4X>>~ zu&j_TU5~Qwuwc7<_$Ie6XU6&P(AO4@nb^(i{TaFVe7a7SYy3=2UDna~M;-&R z%P}9)l1W?<1o8~sRZsVFBAqv3T-t7MjX+1bwlUX*mY-z~*fy9OGs}*x3aTDxY#W0_ zS7aP}Ez8Nt37oZI*y0!=L7$)O!+!jGI}K(gPas;iv#M8@nEGj#gp$idMI6|>>-tE2 zdgJMlMjeUd!?f#8((uafcwZb$g%G|l?JsP>D{^c@y$^0<2a9(y8zo9kG?_|tVa_^n z=hFkTBZoJLwds$pBS*VkNsBopUo9vg$YX-epCHhB*aq(xNRzz={Csv#eBQ97o=klD zv3QqW^B`Mo*4MLSn-UE9ZtWw(j4r$W!=40(R$;uj)a-qS7l{ex`ggun{!+8CwarL* zyhA^GehWRvDcG&f!fmYZtUiG#*KKObEe?T4i*u<%As_P+?RC8w{Nt`qW+`5V^Nc*# zLLQ#)x%;9UNfDQ?cuo7=^ztH%e%80FR=QV$s`D&YdHjPg-tgJ*llnnU%K1M_;ibyI z>ql*@xv%Kak}LI1e;I~5Gmh>WV)JhbV-iH|-Fl9=Bs^rahkJdWFWz+_@mZdin_t0v z-6O-@U~;*6uKkTyIT_cwmsgKR^Y)7!1x4IK<3HOB)UX5zeM-Aw%-L>+VMdKgm8h-z zFGZSkeDWGiT`e=I{3*Wzik1JUWSb>vW(H+|8jJMMavhHx;@^7X&vLA5Xz~EMgFwga z;Zd%4atSAGZtq6nWML4zysp)=sDVxa=4F9LZdx;fx(N>%-=X#RK(79 zPv_0p&yP(pR<=azN;h2C*C73B0x|XI%^SNz?G_YH&=tnIL$H~1isCM6O4XwZDfvEY zO2?Zs9F%vj+`*Ar46l08!|T01zqoyJG$%QN-s700heneKbF;ECr*zco<8}$W-6_|1 za0W*rn>A~ISCGHaB9_Xq#rR^xp>2spg zq4LSt3scDPKn)Z$O1Ybn6NC5K;4UYkPPwtK<^l4tbyEhUaQj`GKJ0g8e7Pp$WGqD2 z*07K&lCAf^F^6GpruB$2z}rSOY(Qt4Ji!ZBR$QVxmTeg0Dw8JR$i^+F=L&z;a97Z! zd86$_LWKG|9&Ue-s_Ys*M+xwmT<4eU{rvLGF`k+m7wmU+bHG8Ri2`Xh@t3nwCdCGB`k zY*p1GCG#rkX3{}(#xzdV-u5ag?;^tc*R1D=cgJb`*iW9|C*D}@uXLGOLBu4ieCX-X z>MJ(i_u`$~S*dKjl+zMh3X_^#ywcaFIus*etr>SO95*<6U30+d)N)lSQZx4%T0=w1 z%Mwz`Zd2rpriFOl(~m6$IDXYDsL7DXHH|ls%xse$JGjV<%E6Ls9aB)^x@BeU4T?{B zZ3u2vXOyfJc@K;iTkeZk;aGR%57#Aq=XtsiUQ;?a&#b-Vy~52F6++(I*+fXCn<#Q@ zxDpm-$OKAk+l6kPK2vZPe^+&~F1#Tk7a>O@Hq41pyoqv=4&QgY&mr1@sT?6J32(T1 zFX7UAU$9MpPyq&Ug@PLZXQ~f2H#Z-93?YY%`b_;WHSxdx8khi?Ff?;mxmg24Q@o5D zNMNhL+M8|CPM=mLBs9UhY3RTrMalH{^OmTA=L1DcRA8lUh>G}S2 za{j?ydFRfXu`GLPo1&!&a-3mrCz=%6FHcc9Vv5<%%r^X_f24w4216U?`b>L zYI~mAtAWB8o3B+T+pWN|Oa$1OU64mWFEBFFj3b0N^NUN-SW zrQUv?!EyIKD_OYyH2ry&oe;fTM-fHNk_n&I*Z`|QuJ79DS``U`q@1JvnUI4@xl{on zo3SK&gefa4sY7nuei*owSQ%> z_g{tF==tkyh|&s9*Nu%+b0hm;Bc!V*eT@Bwkbt0~)gbQ7?Hs1u6Q^9sZhMI1>a*k z8}RLYlA+Z`Ca513PhRfs5`wC1Z$>{U8LBS2@_uVCT1h?+FHIr8hc489fmyq-mLx~h z8p&cD?c@2x$%o#vTENf8hm>$u7Yt0?5{~ z05AzOT@Gu5%T?D|fZ3CH^UKiUX0*}ZZsJB^O)$mjYubJSJ&P^Dfm!t%jh2U%&tw?d zW2(^WD9?ANr(&_I#Tpe^I*HF?XWcvFa%kK)^F5Txv(=UKAtfq<9Vd+YyRFu%1!Jxm zl~3a=qREaPJtuGK?&&oWXmS6M^mKHa$Z@TQY5fb4inCeso!EvdT3Z6}M(@1(>+N3h zdYjya$ivZ^6_$y8)5K#4rseoE`G&~4&zjI$1gN2W$u`^6@O&hv&fO|?al6GVGb^WL z%ynPmwNx~P5+xCpQT@?=$?Ypg>J!ug{Y<5q!Bm(DQO|K0BK)H+#9_8#efY1CDj*wz z$6EG%y=q?@E1` zzzveCA$4C@x4joK3mrx*YaqonRcETzDk^;yVwW>0(cGDTt%Uyb=1mcWSXRaseK&nF zIZak{+JLjy{vla~#bL{x*6q5r!B4*03YzW~ZCQc&QT^7Qe85dnZgWJ13|AL+?DHy_Jel`r3HOaKN5L>8iH)=)v^-ji051 z9obBlL%3{%r!_}eWBY+jt5afQx~&UUtNs6Yoau<=X>^`+vz%Z3%Tz2sp94X9Z$*@p zg(cTcBJO@wO?UO+plM*}snZ6PeSH5c1mduk@0#e;Y*jsD?J%a`VJ%j5Tmw{~a5DQx zEcb;CZ&nNU1V}^JCcoVo;}PkTOHx^J-H-XT2#ePv0kV$b_UA&i!m*Zz_UY2U17RRz7K6fAp<$qHg5UGv214omLR zF)aGAePX|6gKttsaTBFE%k=rgxcgs&D*e~b&uM!wt{V>?KX|-z$0Z`Fb^g=+jY1UuXG1!pvMD#Q z&DoABkjw9^+85|__E=!>wdxCr=w5Gl54r(WlF7u^eB;I>^(r^LC9-V`ab_q#SNyKY zo2>a;vOO6U#*Ne5d!_kEji;^1Po{^KX6-Gs9BYk|nAL9gf&ts;l%^LnHvhyqjnBdR z8+Af@#(y3%pL!0RDl~3(`=(jOI}PK`yiM*5%@H|@)P5i}Or)6nG%cjN#<4Ss*Tf@B z`}oMeGdPyFsaM0cV4-m4Woh<8+|z{+s%gKi6GZc=MQirOQ}KL)R2 zT1V>!wmwiX@nU7Ex!QTPyH8~;$#o!ulrI%RDVDFGNi#q-FW7Lk^6HpKg&J}q3x$HZ zleu!g+RSO%#akAf)-mRkBa=8DNQx<&*99lnJinN%AMI;-y96ghRHwC^h8H*Nct zrCrETe9!@@Yf`8?{a#brrT9uj77kP?St4dn(3HMfb_1-M zR3u%?v`ZZ*D0m6p3K3Z423Myr5mup=e5I9i^0T}kL-F0xs~_%;WyGdRDXBJbby~W{ zxUQ*-m(<@LT<1fXFEh6Dc%g%q@5}&A7wU=gA7N1yx>WY(J14&5T+D)w8Gem2ybX1Y zE6L)%%uhwDt??;F8GH!WNgw$-gt$vONO{>USR692WlIAlJ3CwE*>*#{OKR&A0`jg0 zaz#A6u59bMnuQG1t}YrkDhb|0(7I0OC*o1pU7Ak@Fbyhggzd$iX*CC?F{I|RWyuDE zi}gxxPZ~r9QdS7L3T<;e>U*4YjlZT6N5QL@B3WM=xSCX-JinpoJoe)>G$dnrptTTm zsm8W+g(jxXGkcj=5NP49b%UW+gbEdAWxMy|+Q&kj*t_Nh-`#7(6+*2v{*r4gD(TcM zTT^jZy8}gshMZZ8@t~;6ban*KHt*h5>+OAxuDybppg=H(qIl%5_NwChI1&^J_YfR4 z!V2~0Vv6(cx7H|`bObMGN8c7HIPqxtgxmO|_SB%myAl3V~X=w`t4!6jxVwc4dOrh_%@YXhO!bQLF%W8DIYoKytVI22^RRG1+p z%qRyWvOd0&k{Me4fZEAUYSek6ZgD)DQJx>AK*?Y2%B)@{>S5cn8YSwwh9Ek=+7LuD zUIw>STWk;INzq7-jy|%a-wCu;pdm%m^wFd2GJ=H8QS9q2{jE{DUG}I{)WhP-i5_px zi&h`t?)VA(9B8z3Wc8~NE#qgQU$Y{PP~os?IN+aL{D}$bxLT`3UBOd)AkV)YN6IsG zVArnKLTl`2th(S{vI{D}{1SJ6KM zIT{H$dcuaiKsLp;m6f7tH?X|z&d6dLhS4MfDkxE!T7XR!! zPWN$&|Ld{V);ZWlceZUBzujo{R)|tayZ5Ifr^#rBt;IsN7iok%TW7ZN{--bSgO@Wh zdhx=BkeQi`r4)IpYO2OwZ%P+O3SG&q@e!%_sO-~B(&Ph2sGWqZs?PJB_9B+@J3l^y zcQ4U5e8LTqi?mhb;Nku#*1|@tQek0Ysq$znNg{Ci9nFdEYR&l>97yS*F-Vyn(&@|` z6eHj`YkD2;VWkHfV342lNjlK3zj`31>7ak%r zJ>Ab|HD4~49K$KBsuEpQ;!Wl%gJ=Brmva0|WBEU|dPfU{(XL%au||C^CROHc_%wYL zb_yd(I;Ak%j9eL!TZ*}#nO|8=RCAuneTvkf^GA#{WfxS^d7dH$R1I_6z2ByeMJ*wq zYNZ@iK3gp;Yx8Z#J;bpj(=8b>BsDpU!)zL)P%#fZlnO;NA<5d1qae2}hOQM&5LC;G4y8eexs)1^rB2z)j6(bHPGFl^M zy)C7EfNI>u#83dCN6GGGb4JP~Kfvq40vmH7q|3#v16E~c@+(}oR4MM4Ww&|vEEC4} z;CIAoNp-zk?jC(!e(CCK7&Tv}EIt>iU-Srw(4W>jdTNz}D{RvE(DbFGfie2C+% zu47M2pS5ge|8wY?;1#{Kh;7!+qQ5^M#Ip7t!o5`?)XDZv5HEJ85F3Ng0;vy&mtxdK&AEvT;bT((KS-I3Btu>g0Y*XbmrbO?9!oKz zyqjW;vcahTSrgQf@B`b7H*+n$1TF`gQ(M5~&q@lb%Loucg9?+L$KV_3{KLq%Wt2oC zs?W1SRAB_7%iogqffyicn>k0wt&IcS0F3PJVe?iq*nB3c2m^=*8X&ufRKuQ6G2fC< z^m;%m3$aq>J3Z8M&uOAWKfqx=f#Ny=U#6|8o1Xl@pnZA~q*73-o4|c6imp#z1N+sm zmUemj&!CFKac5&f45z~GL#oV<>-f)v1Hi+6#8iy$<}CJQs%ffXKb!KVXIVE2UN>AyFDBFcxD@7KGl#F%{Ee?e-LgknOQ*{Mm#)(flhwpIu5AY!c)H zasZQ@JCdg-4jj|)wt$(OaDSGDl8Z9-@HcX_k~_R#)Uy!!lv`j5j9T?UA=}c!8DR7+ zM`XICK2u~fRcF_VjND!+zPR?F&=#pcaKjQ<3n+g8!S6G$W9P`zJ``3jL+6%a%yax{ zP8TKNfD%9h%Wz6AX6*N9V=HJ&nQD}~?pHO|ZUTr~DbE#D{2^7RfYS04tT|ySdfN^p zOB+E_oeCt^gRqs@Ln*Y_FB_#p#Z!0o9k3dV3|&TT>;puiODUiALF#x3RWD$Sm>;gY zT0esh4Fw!R3fiGDVn8;ODL4K#ik`SfV}qQgUUHXp>~lccY1oJE-I;r-ko0dZRD2Jb zJQpw+k@Tql`CeHDm2DiN>>yT0`=FLIaYnbvgzyAyD*@&g#n z=qI|a79hO|NKYH`9q zy~b7@9@}%E_A%|g;i^*!YEVK!fr%JY)&F}r9IpU-LSoJ6YwzJmu{Ys)&OaYzpbye~TVP4sm?{^GxwwYM5ud>|+XWjp zEdtR_yTqF7?;}|tB)C*|T?lk}i4A~Z4M2rn2-&Y2M1l{;3))K;N^n4vk3qU#U@ z;7KyraqF%=omYWhr=uGdJeYh}T0|lPzNq+>!3ii!Z&#o|!R6cyhLcQt5WFxh`E66ga&3 za9c(7gBNaN9bTuV%_Wjk>F{DHL0R_S(Tp)+Pln{hK&VcX6*6hre)K)*dAy9weu{nu z88Fwqwgy3!I;M}^f)Cj4=35znNit`iWCg#2#a0tRX_=7|B zgn+nc<-3G{SV?ioVV_`_veht8kqzTwE+|0Wir;XfzeY$&3czZlWNEfejRzbs(rF`5 z_#98qOpCcm__|c1U4KOrdh>M^X!2=TjiH|1>A{C3RmKc$w4C48M0t^&0%K~7S8|eZ zS2fbrHFB8XwrBL{N2}KMFA2BlgP61xCRB0iua?pr{*X)p?_s*@&K`T~4v;zhXyy3f z&$x1}{?6W>?m_407A{ii(afk?n@p5cPYZSzv&nfOzO>#BRF(-<6k!`Pow=Bi)Ho>% za3-l=oFvl~5=Dh9M6eGqrOwppbQmbu2mCNq!QSog(`N=91%05Wl1zcVR&2U`F#SQ*a2q|*fxwMVh5=7h-Hcvt|2)WT(IvCRIDA=0{B{^u;*OLZlu!&Kcy6> zNbxUT1uvfG9Iof&zy6>wH8oX3q<_Qrxftg@7_eWQ$5mZakBf^h(tz>dJ3Z{T&G2vt z1Y09&FFH`V&99HA6AZFzY5)2C7XE2aP zo;c*c4#N^|PGs>c)`#HYBGL(N+47Fa9k$EI^5yBfFaUp#yvC6~MNxweu_)p6edtEy-e^L{Esx(}u|NF}#*+DaM z6;i+6gLcmSXitd+pHV#zp6_W}% zSh#@+NC=oR3n5q_s+unQT)*3~ODof&JSCpTdc5*3K5*RT8s94^Z3NRW3Q#YuU3omV z5du^I(4bzJ1R%msSgNpSYy&o2qofnvwwtl*o3sHPAX?B3-bJUAEwlm9IzS>F#s2iJ zcAT$9pOz^wgX2JMq58p=6Y4~f%gJr3?gPW5$o7v%Ghpr-@>Sfo$?x`~X;2yw=)N~Z zA51l_$}XEH+qwzL0;lup31wJX0UzZ5D1`qZp}_3NC|*lVY1e>XJ%?C*4Q0<^JKyF6 zm^&riSo-y!2dLGsig$ndWVWV3>y!2v0VcqYp3!#*u4G|%O-mnxQ)ForN+;@? zUv0ix^d*-2l1+$Jhj0_;2su~Vtj5)zt94humm?}29NB-h^koW8J5#q+1DZhWloEz- zolw>}V+S249$7XfNC2}@=+Zca?)$coqA(1-vHK} z-F+ucOcD=1vvQfJVVkrS*o}VOS?tFSSIiiYPJ`{5JL3Ls{YOQ%2uOhGJutebXL4RL z#h!G;tbEQH#g8Q*gIValwR0TygR=8vFfK10&f6{bGJtRph0@D~R+Q{4&gcJ-jVrDi zSSS4To)q~azjGa@G55^NS@1ON?Jf5)$%XHNTin>p)S$n$1InPIPSPbUfoxOwKqj<9KdA<+tdWH++f56nsAH-o+ zoSve;2L=YxNM?Z004;0Tq&F##S+6RW+=jdn4Zeb9PS{Y3EDcA9>h8Y|n_NJXW}J3V&M13+g5gVJ*WoYEi; zI#RmHwSion-rnAnw})V9r!e#0Id0+EQ`uXxk;~W>rfyU#BbCkfOX+h?kk5{5;0a^2 zzw4??m|RTr*-6K_9C%2_Ap*u1f#60~7DRIY{-4*HV3)usI)I#j;$kchyMg{Q0utmw za@HWm>3;tTSKmL6aLP)6#RE1T{0Te5J>=5ad7}soMzfy0|BPb)3MiIe%2`{CKHV5r z82)|qr{W~e2opw2-P&JK<=^N1XC|lFC_GM16C13G(094#nIXTr?4O|VKVrgs(+Wc7 zPeA??&N0+4W>)4^2j_Es9@C|H%dh>3i2AmJ8dm=+u>ZLcvnvDy8X0W$%3al%Ke42R zXOa8&bdo_LAuEIOSH#0mOVTW@o;O8uGe#`Fr%i0S{Ya9Z+e z;ugVN)D1z?-{;-SDua1>b2kVguV9#A$-5*JB-)louhdfd8M_W<#FqaPB3>yL5&FiPM5d=@JGsb*AII8DG^LLY3?IDEdC zwO?a3XlSQkv)nPJe>bHXBj)LG!-ZZp>XpkZV2@Om1T+Jp)qhRc5?-5-+=)D6M8p&N z2sx}q8Gkaqlb!mvH&~gGfP!IcfX5_b2LWX*g54D69p*yARvV@@FoD9(*UCWBx#zux zGm(&iFAeZDDFYe-Ghf~j27t9HHlv5VXXn8s-2?3I1CT&V@h`$w)T`&~OJK7r@VN-J zCTys=`y>&R9%Ew{h6%ekkYc1z;yo5}>I8oxJV`whS79(;P7%)FI63dH3bT zPIvgR%WGhA_p61&TI8O&N^Q8Yx5;9#YjHQ=f*pk23)`yNaA&IqnmlM?HLcrNmi%I> zXb0TPTJd`>ZqN|FlLg)I9wPx7+9oG8mCimAyU90=-N6wj6lmzzy|^C@^MQ}7%zZ9) z!3hGk#!KTR@eacm9tfQ_<>+cXp^y&qtZ2Jr!oZg&Cx zyszpXjkGSGLzIDu^bEEl{zeqoiO_QSfNe8kH8US(wDK#^a9nyMEY)&E&=fLyVb8M@ zyOHWs*8PTGZ)xf9(#u`!etL{>Cj4lxw1!ITc&Kw)3|O7a^eQO?Z_S`3BUu~Gts=1p zjVK)(w~#SsCo;p@@NK1uoiYF!CaB8`Y>Ndj?7pxv{c#4irFipg_S9+S!JIU8s}5Kz zJ`s6%@fgJ#gM1tXNfS7Yq(?!Ouk}kHou&pR@}7QRBSi!#hus1tN32!-~nglf8h9a0mrP(-OJs5#8D5 zM#^fXp*}}`#VgDDTNr|T4{`TiYZD*G`KK%WMnBDkdW%8>ETD!l1VbyTS;KcTRR61ZEOS#Y9y z52V48Y)h-Vzbch}`BmSI|>W*W!}o?}u&FBh*Qu?pSlRaTKmq#i5atT0ek3A^WLF_S8%;atr- z&!SDj)2Gs1YErt`y}Nt(jD!VpyR~5dTl?~r;4N(tl)e9@Dpv&cYjbB9vP7*rU0iv0 zH9=9rH^x*=g=}S>*^eL)@dCJRVie6}Tz?`X%2F@`xmvFgk*b2GcJ^+|NjXT@#(Xdi zsns0yIe~3l>3v>>2i^kKilYh^TI=?e7MB^ib%-)_L-)8HmZ2uE- zYmkdcDKB;*IN7y&mY$C)>pIbH z3c}|NmK?_s^ae9WpwO+ei~r!j5&dHeExE7#PFkk=w!~CcdT8U%&cO@BHL4B%xk3J;FK~`83a#F~I5LPvox~_gv8t^-Qr<5*mHTZ(Gdl)^ zOd=HU_O<<{?0qqk+>SU3V&YuKrxc^P3UMAgU$-1LgbR24G+ROb)qc#=ZbxV#Wzn^1 ztJ`_v_G_*$3YAA2V!=Z-XYN_fFijXkG3~o4u(~db07_J(V3ou&=1LW*2)0rK@ z;Fzw{JyQG)^|4d}j^nYV3&q_&$d_$K_GguO2x z;n){qr)F>1)^{4uTlN%u4PD)m&-X>e^~ZOb8NN|-tPk?f9_&Alq&~%LU#;iTc6lu5 zxE7Dox`0IIl@p^J?KVWpc3w8LrH7)uqdO%_6Rs5Ouiv5Q&-E9`6^XyRL|x_E;JIEur8V{YR%;n35>M~qNO z=bDbk4BKxVgh$S_#Gb{iJ8ldMjC<5I@LjD3eeuoCaz9Up;JP0sITqsM>bOCE<#>-# zB&na1)XH^?a9qqqR`>~?_wkEUuwm4RZD%zp2%rN&;OB-lPSx`gIE~qvBR2pa~WXlPtpw_wrhEI&( z(XR99Zf@JRVBZ8S>YF0@!m09**sAIi2dAoCi_tvRRQYPAB995=b}g9SMrN7M(85g4Dy#GR>--HbdUPI#WYeTOp?vA1Js~>05ZtEKfduK1 zs?Q6LktC2BJ%iEx-oox`FoiO#ai2L5r*D{=K{p)IQ&Ghi{&f(cVQXN9DDibW8fEG+ zzySW_z}*);dFGxZZvPWaczMF3%BA7`>-y3Zj(S|l3GIY~?s25U{HXey{d$g#qY?cr z=uM+b`Si0zfx@$q5hRkCKxfiI%eI{d4hZoQMqrMdR&=e3EF)vs$jeFvpQ`skg1erI zY$BARRn;O0Cy>wZ(%Q0mzKat6s@mtv^Oj%Dsj{ns*o=cVhlf$jf9HhSgQ+R2Ylw?* z>nsV&&@C?G+(1A8SX0wmFlULdw7!q!HFjR_OH*PO)E#`!ZB=RznZTiYaH-q%yC_U} zbGke?Tsx0PGh(6gNZD?6l}kFI-JWvBUAKeOJ!F8 zet=fnX?ZVdYdbmLY^V}cKL5covLqBCemk)BrGib@=Dzvu@QVQYcFTSB>F_W@C%&hA~ zi}%pVb91X~=2M!I498ez>?*b5&`6n`b5^vVh^pARbXH;^duES%`loSe>|Q?*`ozuz zO9Vl}DihnP00Z))9?M8Vp&lBW)G~N6|E+!9c!5PLv1tc`<0AQY*7Z0tJh$&2(CLFm zt|upuUzcRcX#dc z-L|rrb=PjkS6GdGD0I!Og~@I#Ta1-w#K^za7<(X>(ik#^7rC3u*ij{0b2 z3mG_ookFWkjoadR8H}q>)Z1c?p7-Wx+y>oKFe5{V{FZrWM(4vW`xU+v|7;hBl^Dg4 zO1>`&xxJqQ7h=WPeeByIwQycRaUTTE0I>}q3G>}~S@#)bFsKO5c08+Y2I&eKj@|Y?li$XQJS=&kC z)8wg!K^Qw$j@GD31Mgd!5x#NP@wZO! z|GD%=b(cc+c}MrPWz!$t!^?zJO0DJweHxq6pL?6^&80&vg8fUu9Eg}Wq7NWf z28ZT+S)T48??tcd`CljM3?%9}mW_Keb#Ho8Ni=-TlbBKQ>V)2(Q%-9aWvg?J;l9#0>9h9O^_;-TYg*RP=BtUA&~m zkhrLLh_ihtii$tABZ^2tRE+0nXOTRO(%EJydOnzv`+kS$o?qGO*g}Jt)=9`gESG|G z?cSQkD>ncPRq367OnLbAZ0u=g%Nv;?2jbTtH&IV0#LQZ?gHcTbmYzaA&t4x@q{O@6J11J3qXR#d{OS%Y!USSxQHS5g*5TmW$jPQ_D^h8n@b+QX3sT zMIWuZ*uW^ctIcDKVxDR+2AtLJDP*JGy`a8ZbsQ>d5FRS}6GO^w;kGQ;elUwdwG*{` zHSfFx@7lW8C_r=aBJS|aGx#-bTzJz1F&A`1zQ@=yMp5w)NSr8S*Q%XQb&!ga^mC&H($Rw9HTg#ppwbgD3hD*FSMakKnp*7 zoO4Mw6P^~bmJHA3MWMURD&b1?ssq=@5y7#`2IEP)9COINt=>W=C0$|9DWALVL#d2b z5aYsj2B|oaIb9TdSqcVkDUxG3g`OGNqcM*lGs87JRDzQ3B;Pqj%koc(Rr+#ENqdT; zqrWOSE@i}U6Gp+CuU1uh%!QRdW3mq!*jwdd#!I*v0wz!tGxXIW*Mj)jp>;68wCT?r zW#_6fugMb+d35Ke3I=aLOFax?G*BFtbiOdd)>mb|znt2>--p8E}2#Vixt zBx+3N*th**NY34Ey+3VdvfoLB$O=w1X>X2*q#m=_4ZzXRtymkdViEghNPPMNm)Ltv z*wk3oHqk3NSu*hftfD_lD-NftLq0>qcl;Su;oEQ-s%-TtI@KJHs#dYXi2SV+lTGeO~AVp2{M9A`aod~sb(?pz?bq)B-B9zD1c53F~*;F z$RwF}<|C0sB5kW}De6HJ?L^O*3Wd08`^viG4W5d1405Hb8U>MJIF5F*LL_APQoaD~ ze^{EF_+GU`tC(FH#_pIj|_lLB+*g zLP7zk;!22yT@xawM3B;evxW3OG{M;wWg0}?la;GRV<<|QyN>{j+XMH zX%yU-I1T(dMW%i%V*C^C5#$>-w)7UGcp+k33b`Hq;`@t-fsx4F-L<|)RB9jte!SP6 zkU$(S^g<$y5awwXxu!r&C6sc!mYF%&ER82KJfeJJo4VteIcnDn=(s2~t4XX4pnI*x^JdB0!I9M8jdu-YInr83CChojo z5#N-$?^4&0ppmB)X&go^P^1)R|0Z$gK<5ngSUWnOH~!o5Fp^W$W9#E$B2GJYDL&2o zl5SH(2vTCc5KzSGJ$vcYZ2G{$U6Q(2WS({K7Fgy9BeDlacSGim4869$pYd?m+4%C? z)Z{FM$*=JBn$+4<^h#GT$l!=vDpq>2Is#VMgD(2?JfBovMEzwx9DZ!D+wviui3=J> zFLqK9C1x&-PiC(kB(k&9`{C&8i07T3sS5JYOxGQ%7@V9TQB1mP<=&_}7%byVbzVo$ zb^16RE(s~A%~!ueHoJ2X?d`0X#my4y8Dfo*bmbbsDK!U4qcl=Y1xh&Jx1NurnLHng>M)l#676pbGavkRD)ORB3W%heXhE0y2`Ffou_4w6x!w0{RZie(2xFgc| zIFAMA97;TUyGI^wP<<82+4q+;hUsI|I%7UQ2P;a&OC_T_8&EPys#le$d9cS>bI{0^ z1QJYJrJl#7a9~>11d32dvD&{g02K-nzbV2^We14sdjQ{gg-^=08yBoj4m?~9*Emm?8vgU5YWP!< zza5*sXFiDwHNE!T@pJdF4?wo^HP+Emlrra1agmO5XzEuuY(#28uK%NxU?(XXZ)dW6 zR5_t%(zJL*!9PdjngyY8;)#+Oa;?wPaEF?mbGX!;hDy*eG$bBnO0~h}`p^8%Y|7^^PF>xV@7rTP=nAO)?sn1Zuld~{{>y$sl0HFP;f5k|Z5(+I%FGl0cT%8ye1-mTuPE*w)6>0YosuQl% ziIkr1+e7N*sl4*>PBw8igO^~kY%JxDqT8}r6L}myCYQc%K0B2$A0#*MPju4*WjQhZ zQ?JEwk?0{Y>Vy;h(eqwg_`bpC=@8#U=>fiZMHzY;xNe;D=+qKMKH+3Y;99+9ZqJXe z9IMG2A>BGBhC5;lv)`5mghcx3@qV~a+o~%&tdlY&I*Trox#sfbm*Up%iM9e*nC^#RiiSDPih-)Q0i|-QuM!8dZt&Mry zMSjpJ6eD8UnPG&de;78DZ#*~5}usYBU&#VTA5>IY1s>Q znKBdH8J77fJ#a3qX(ZR2q?7)PXqDODV{KLILyMhrXPO(9&dpD9@N7lz6X)iXgDpW^ z(M>N>Zqrw8%Swp>F)=Z{58e7TOd5s8w?Y;tYCMGj^tKh<=|&e!AquwaWpd3#-su4- zgOd~i5;&{7wVMhp+@#baCAFI%e=-dyvOu8JBs$-jm?&RI2sSzZPC)ZWGDNl>R`Ov0 z*jaF4`wn+#Uks1AVyA~}d69|f8UrP^4?t4sh|_xnlc5{M=1*+v3Nk>c<~O+@+?O^0Ulp13^8yHK zR`B2qW-#8)f|9XaWNckl?jtIJN-j2ABt?*=73`P!H#{0j2df4`(xnt}xthg{K+EgX zberc1+Eez~PASlTCgn1g#mXCnV0{E&(FoJNtD0JB2S8|7TCS^-j7!p1=2aW+)CK@e zQ|B#FGUx3RCvx~?oQ4(cqe9bHHN%KZrR2ZR3V0#jSGiu!23xCJ!{4Af9%CuEd+9;R zH%EVpxWLroMO+=sZqv!145CF5~N@=NxsnV)7OPF@XJW7zr^^T)F$jiJ4 zCMT|P-Iw2j0pwF3R8PRqPxsUK!zW!Np-4y{r!bJ?J^=gtq#$eWNrP2oQu@hl{UdhM z*q6q`$?O40`bbI->!qQ02C$L$|9Jj-}nM_ofX|s zBWQdgOWX9v)!~5oVBYOM15e`)R)kJ?^ZS*Hl-obTE<1}X1_2jY!8!B80wdUZ8kR@i zngCM5kURU%ZW?rcN(x3YUNUSc805YY6IVg@!8adsn$vKbP8wWDtGPrfDL`%#h+ z%PrRxbcGr~t%9!X0389vWQxL|uczbj-1hL;`^)0_@t%-~q?$e-tl0_^HkLvQ(xHPu zLTBJj{1knE>Iy+*Y>UraK5UK%zLH(q1Ylo`)~m-Z*NDDw%GmC&kIBf?glN~v`ZT7f zFjrJH@at$=VW}bREzayZJNu5?_r;DDlhZS~FR>7D_bgXDy4qeTHX_g7rV_H~B8Rsf zvbuwe`0ncod@QC*MyzDsMJ!Fod9E3R2+%?0mw@=7!lj*@AhJD-bXcTnn4lAk79<~} zqN@%u(yglfjcOi6{9}mNcTLLZw22X(;yhl7r_#DZT8|chHcx!h`8W`1MM{>efn1*~ z5p;{(IIMco8*h5Mwe*bTd`9f-kzAHN@TzMPnmFtLp7E}KzlRkY-!Zv7jV-DY1sSR_nT13}l3q;U*9AN1n@3d4Z3|EC zx|+ou;;_C0ekWPHNG@vwFS=uSmn*41ypRvT~t)@%?Y2TQdQ9LG@huxVqTInGw@ zZ1mN-hBzRNY#hl$y^BB4nvu2Cl>>EByHFD48 zuXau@0B#?=nI6C!WEvQw$X!VwdwTkT&Fz8v)12c5+>4y$^$mcO_gvMuT7nf31NDcR zwrr?HKnjJhj&he2XnBn3X@+7^VsPiG2Ur5oZSx&X5YWo!M=E%1e@W3u|=~Mn6N@AnGQM@J?lid zBN;Vj=GLKx)Vh$B%~a!!n)AlSrA!)b-9brAB1* z-*23Q`X}bQs$4cFYgtfj)|oMu*PTEF+Pv$|WlnmVzelZyTdt6Y<{NQ-F*en@BsWmP zLHUev#*a*>?Y-t>{gz5syO8gQ`%a)Fg+YB@z+0;cmkX|D>(7B^9topv0j)Un^ztM& zB?<>x-AJ<9D3O@d74zIGUTwFnM)jKU!aZzWwYPUZI<}P~f;UfBlNcWiBSIkp^HFwV zX-!Jd9*g!b3y8h;i}AOv`Le?0=>i#?U>^waFA!-*^*2ez7^e!rd;j?(xrx(t#~CJ_ zLuo7oDL-I`0**}6fI5_a7jZF)Ld;77-)N*@u93)Qdh4;K?|i>who zfEG0kKzlOLdlJ;NNZQ-m|LX6GAzun9d*(d@(o|~M@9|czqgM0*H3}14!6p{5d;>ra zw-&_C1pvX>o5YZHaW)sOV#YVLgYE#8REk{02JfwGYj>vVtzhd0vC|&0#uTZ~`hbpH zy4-?oi`EZA?}}Tr+!5F< z3dBaf3-Yevv8M1DNU%3rmX4Lo!SBrYV-KI+HdV+(f&rw96T)Y*EeKe$Lh8!SNQZwRT=>Y*2Em1QF7TliOj7I)c*&( zV8paMeGcUE^X()K+w<)X-pg+th7vv*V+v7q{G4iE#sgEke8FqGWWF@;QQ_4T`2`K7 z+iy1)ma$Zv-yl!^#l9(03{pBF^jglCeiPn*$fke8X-^BG;jQ=o6aKkni!fk(@|z@L zYR5^oe#2;5|C>W9{@`;oU%^l_#BiKe^r2DzKhY_GoSh7$sFW}(!j1!)exq^!l)s4o zT}F~l`d?I*tETvKcYqfRMC#~|+T8xlUw{4eGi>j!e#aZRKNynbB*6ur_P<%`f5GZ3 z+yfHj_=*=jr}6jcpOUzwdbYAyj^|AO#;i*=oA`@_6`%c!eT$r3op>?!xs{ff=l<_% zi$9q5nqK-=akT2t8>v5>pruz-p_bC6-*msk+OKX4j&C6f7*JI-Mf39m@)2Q_!aS`kfKKESoX(^@B z)_*aJ0a;N5^}oJbw7~OR-2J(`yZ8|DH(iEFP|V>ld1j#ZyNeKU*CNarfAg2a^?&ol zR%N{=OKFFw3I8W<_AkzH4HkPWRRii=oKxCk)qg{2y{g*}P(~34X->Z>G=i=hBad&e zd{&nIb(mhdn3W{0Yw2J9fcgJ=>;&8+jF4;T-$anV%L`a$#qC;Iw-o;S#{LkEz1PT> zDGHQzJg}2TY(*C zW{}=H%PY&%pcc`u1Bzcm8}QE4U5?VSNH62Ee8Vo}Rf4S$9Hay&&J&Ugs6nBB( zpWvr+x~#U<$mYQTaGyw(w~W%A(v8@1CM;71787Lf!?{RFU>8OILHUU?5g58?!~nUc z4}v4S7U$k`5J#3Y(n$fYh8^y=fccsTww{t7SivM06cp@arBp?vITXv?Jt^|Tn&My! z5$Zx{U**FU7l9wd1$^BApQ|>217NzBq+~yWK;)uzH3eBd>+VK1Ae80cHe!oA>9`>2X`|^Cf zUe5~|evkQ6A?d(D$9N|@+Xb|ezh5}Q&D~w*nTzfGV475QdrErzSpHt&LV~br$DNJh zYtHL4EtSb#BK4h}Z~;Cb4=BFaeSX){?n9hDK)MGq3VB=7(~0rlo~EA0!!KVN+O_<5 z4ilsvB4hMmdV0F1wc_X>h)VhyU9H}cr-1U!@X+}aElLeOZ;i{u7>p|rh>K98?>nDjq%)ChP zTqdo^zyjN)V0P;aIgikO<;xzo3Wr}Sy#&~Ie?L}F498MtY+1w6j+w4+1A`6L0vk@Q zT~*2^3A4)IoU4m*qv>Zk#>k-aLIVMl?1w-spwWK}bZ&?Ffoi6P#Z)A>OydC3jP#4G z*ZMxBNjhagn47&417J8k?N<1e16=S>RGza9ABSz7#eR44OXs!=fJZLW4N)ynb@>8g zueZ{(fbo~X-Jf4f*PDCq46E7v2VU-Wv3<-AW4n0;^A!t7{9rAvtMx!uJclB6h)a*J zMyqWc3_iACmj(ZGoeQ7qU#;Xi={mIPuA18Hk+hmVaIK4E5pmedw7^RC&n6ErmWL@omGVVg?-c=9ebL-H%pUQ_{f`_r zPWPV=Xzn09@cn&ZbLj?`au}2gl&YAuOaIIeF2guDvveEH7y-KIa zow1bvsPf_Og{#s3NLlgrtN~X(9+)lp6kiLZ0eu& zN?R}kvhwIV{U)969ql2dGnFH1r$RAHhg{-Ro7{g!j0D72Ul3#euo6^|vRAl`Bq6v% zoJog^+ho-&n4!h=C9XXBGT`ybC2ev!m{ShNKi+#AO<(%;1fcb0FT7Q^ICfk~i#u{9 z_y9KG)o4{N*%tV%xcuwosJa;;%XomCS(Qfy*!&L5>X=4AoUKV#0G_J8As8zy#@GNt zW+Vac%(N!#veu~Y@wC`9Nj)I^%Jx5f4^U%N#|Fg=c5%Vdf9z1e2}v0az%5`^5KO{H zHg#1tpMAsvx`?S`J^inxL<<%HD|MV7HWgi! zTN+p;f;d-9_&p5H1O&+*cA2zwK&9+Za1!Fy_zF-c5X61SNFdnJBRQgLce_=N6>2R| zgoOd9>{d5e2~e|NQzmbmEf3NH5hUO{{x{SYtE3CG`1twqq_xghRWLi&UO4G=zM1~% z9V3FuU$vh*YJ)O0$OXI9MZVA9ee_a0zVv0DWU8|k4b)aDOZ;9_;(@{&W+@j`B*f+$ zMgu6gx>{A?xrVtMV)~N&XHy3wfM~1(HtT7dh)Sf$fp2;r5vruOT-jcq4V&q@cwtKN z45W=H!w2pkLU2WtV5h;Y_Jw;lzX0|6cTi~HFApx^nFGjN&o)s&PM)QYsUME89~4dI zrT2tRzI{Lv)IrGqDk*QNz2eY()(*uzae))NCL$gbNz7aYe00uTx)s;T~BkvDU^7&pV{Nvo(bZ5{W zo;x-jc?}=p?*ml64}r*DtdmwZPC#@CP;NNSZ32Sn+K@$yx~mcXOIyv(++9A!?C!~! zHipZf(`S!DQsuwZxzS@bWyzxBt-j*fEN1uJ-nK0^=HpF8vMN~){gt&_XPd{|1{xeX zK6i$iIOlH#Rvjs)FjcyuCULYPqjl_O&QaSQJ_7D8iRs&N11kqN-r#m!*>>c#m&vNzg~v=4o87hh=SyU zn6i)?U()-HH)9zkIXjseay!A=15CVLFFG)mC^+}wUd*p=In%v&ZD-#zhiSui``C20 z@xejD)(#vv=Dyv+Dj)vk0pE%R8jR#Dz)jfP!bN{DqU!spIAG5My|5_;wT0QqeD~e! z7I6F2+|CJO*ls#v&cXvN)&ujC<&9_07$ICdYN96dmoXU|s)*dwH{Q$)gAGI-jeXLbU`#fuk?PcOK;$M*t#5*-Va{c`XkMP>4j}^ zMq*m_sRIgyLu)s3x5a>=%7?@<{s^a}0L9AY-Ous6pOGi3Ma7eq1;FU-ctCgf0SF!2 z`cGO25tXWG{}Sm0xjzob*$BQ6F{vt~SzYtw#s=5o0Bv{?K{>~;E+&BrzTkJ)(e=2o zNG=Kbo<)Jkr9)nYq{F-`z~7IrZqW;N>Xa805TS5KWZ#voQ4hu~MP=sB2*d)Cx}dq^ zH|52qZa+vz2p|Mcv~g36&&-7no=6RbDmVM~+f2RUa7Ec2uFDze#|zXt(LcAI2rHX< zeE~W%-?jA<4e+TUf2d%@6LCv|n%fBqbWCNck0aByW5<>Je{bDnd8IJJT%9)Vm_=`>;W)vh+T#bc>Y?N%x~c0JWCsmvo7~nTKsU>XOXetkEmLc z>q5*qs;85rblkb!s=w6$4!Q0-J^Z6_HkXsG+0?Ap9M&xELTpB4O8`8o{|pW!;x?0_ zKfhflaN#is&BeMXQfv>!x4JT*J9~q*z{VJ4!|YM%92Z~o+nuqx>pAgTKjW}>T+VNL z(|zc>&HL`|l()+<0p@ihE%=F7S?Mx~AhuMKYQ>Vt>f6<*a3?2GvQCp~IAv><+<3=1 zj!(lCRFEIBE^(`!YC?&K>ssl&3tF44nbKZ5bE+ma!FF0Aqijh1ds|anuk;qE}_byXg-&V{xJ` zOWFAP{fM&^3~*PS{rVfk%BSbvK`kD!W#8ourLecHbB+BF5KP2|bC@F|id3b;Bh zz)tgQwq(U>bL|e0A3fb9?=w1)ncnF+kSh?ejXRD#Dzh6 zkYA#Zck1L1@kss^sgf3Q;^WEJL?BFZ(oATbt6HfhI@pzy&SWwxaiiN?bFGRCNg+?I zCuMsy)+k2$6ACov=lpA`S@b8-w3SUCch=Xp%Ca@67n*z$3{9G=zm-6!J^LuzXs*xr zGN1?6^nWUlZJDxL7Q)BNM?RrL2ZAO&{xqmfJMr{(2w#sP*fzggFWP6}_n#C#ojkqR z;{aPoN>?WLEEoBex0&yUoKw_Lf_@zk_B=9>w~{^n_`xHN{TqkPCq-q1x)#@{rDWoJ zgJkmVu-{(fC8kLiYg7v6831;T`(~r$2sU_Po*?SaPo4I1Y#aI)k#n4WN_nh3TT_VZx4^BBp zSm%XuO0(7dPr9lia?bc9Vh7T}0x*1pc z<(c?th2#yYn(pn+6?j&a4_con7rv5dlpoK$8(##;!u;tLglF@4k8OrY^zaMDi~m8c zJjuR)vF1tm!`6(^+stS0doIR{1dWAy(5pAEDS$qf)>q|P3Z%3?CXBc{R6OgA$rg&# z{3Prs>~^ty+Mw|1GaG^A^YFGrzGDC6bWvJQAgCH7BX4@={?U_sGhM8bE{8^x)Srfj zOjSz~{i2XaUHC-q2;1{3a7eaN1;Mzeg`)GuZArF^6R)*AGN+9p91sM#^zF_dy0Utg z>ylztG2u9Hx58w}f8_2!SP#?5&CyAXS9bicEMqq+rHrOO9$yjZlAOf(5O%tA8RHAQ zC(PNmHqxbBZLs=wDp}iJU>9g9I zhOBiiZ^vxb8m{Vv;%moxTdf`CHN>YaW;vvInG{5~WyO{$$9ZFt2ZEj~#i$^1FO)1n zI}woeU!}^fwCm)h7>dyj= zb;94oRSTNKuzSj;Z@w__9v$`xGH;g^{K?sc+-D|AtnYxaO1PPWRoDU21)L2I>7pI+ zp1+g_g@5|bW2{}+L*hEsfB^v)EwIM!)-Z}<3(2L0r?!0<(rZhv0LGwRqM=R%CQe-_ zC_OgYFaZU9V(oinetn$d{QMMcyshOC;dR7ifA7Io*tG0gCAPw@I8YT8y`WPXTM|3% zu#%n@S;BZkv3MsmnCI-C=`pr!aZ5w8$3g8Lk9%fr?bf|yza-KaF>Cu~q;2`#6Sa-F1HEr~1#gOz!?GL?&wJEsM>rii z+gGlft^AlIE+{N6ehWjv@QFY^z{b#O%GJ-#K8uk{TYb}|8mx-X5;;8VS-0BbOi4&| zGzL}~YV7@_irrn9i-z0RT7ie3TXljZ<2|8Zx`j!*4sxC4GUF?5D6c##-Z63buT391 zOi(T4QR@nTRAGFooi+@9@`)}B*8K#?KD=Jp(_wjWG|Uc_h_w|ch`pbFphr&nXbR8A zM@5BB_q0Fi;7P`q=7?c^nvHsqhzRQe=Y8N856zlfOPU}#K_RnL zleU82Zimrjk77<{OU$4LlR(SCGHkIq2l!RLr^#Aj&ezjvxN#R>HCN*zr6Ec%g68zx z4r7~x`82$D#MWt`-e5_x%-n}I`s^{ZvlD+_bk4Vh0p{4<9%dfJysGqsa?Sdn ztNz42YxPHYw{EGgwm&N+4fDOyVMwtBbWUTQIhnH$voiek5l^i;pEg`2tL+=!PsmfF zz9nW*O-8n^m%Cyz$fa0o3BF4{vrR&WOwWf$rosAOV&axhQO`~Hu#Hnu(99Nb(t&#x zR8|`2^qcH3iPxh{Uf($>_FrrbHiU(j?2-32&9U91()z((zo+}@$!4PO_`da6YC#4f zNsbH;N^A<&nKjswuvi!<$10X|_{;I*aLUU9jw4ivA%{W?@Z1w?K}29HTypW(|CMGL zq{Bw{Mk2DAPt}VT#MiYxEzhJvYaIXG1*{YK`FL$M`wW&IqSqy%W&ZoDUE z2FGViPG`^kG6-L+wI;t#P9gCpBj%Mgzd)7UFi($I56fEd&*wRSG)vRKn=7|VXTfe& zlXd}T)gS9V=9CFazr`7q;#ni;$-CA?Zjca=3JV<=vt?j;>vIjmMgM(UQvNkJ+yo z>;my=+bDm`J%7HFpAU)4ly9n(_qV_f5S64Y7V>B{^t(<=TaibS7?6Qgbk}vFZ>Q>| z1JH?nTSL}kED6-%XY%tvQ(I5i+0IZ-%w$qxYzDC>&R$g0Cri`s`|tOAgL}imDjh_V zA{jZ=@uft2!GZOd{Ny=n7P0YV8N*1=rrw(l&nE|)C*GxmT$6)-&to>^64-Ch z8waG^Mqi1oqU+kvMTBOE928{LNFNoUu0RY}7g;qL*o)=3LhC|rzT5q>lkW?%PE@^m z;C&5KgBvlm>iCy#8hE5$-!tfIuol`!^Ou~=&&EbE;79YX8CrrTPJVj!a;zuW@!*^z zDyz^Rr}Ba}VCvCv!RVN1WAr10=gm!@oam6f`2_DyzfhL0#Yf^{!E2CDr2^ol%K-AK zmy^lUq01KtLIgm8rC*fY4Zc>mwBHGZezj(v;p>ATrUY{7uS<7BOTi8qC@M=HlgZOc zr`bG&6x?A*BpQM*UO(&~OYoQ6p~obVjmdh9f$rUY7B6i4&g7#P^tPODcS!ILA3==B zsO#q{L^Tsl@$cS{j(~mYMxw5y>dv#aH?EUOMK?3p+ITQrf>#&3a-2p}yCnA*oN#_y z9_2YTZ8Gg}A-MmM;YdxXhFhhl1Fk=k7tcx*rm zcO~=Or&OVr(9Q_?ia7h-2NC_K)n1eoojV^Je)!fuPp;V?6eswsW(4JkgT2MQ#J zPvJ&ry)(72#U;>TOy+G*`r;|)Ew$lnnf6qem3lFg@dTunS5ap|h?k(svnmaqxO zk-L`anY(E)a`c=7L6<@G~Ll8Tc3=$JFkc;05Suz2_8AMe}??WwyD$vGU$ zauBBRO?TnT9HLEmm50km$G8=X;&PsVm>I#r=Q;Kh7%|ruabMB}WCYRup_f0DKe{fNKZl81^7<5w|7waaYR}*JWH7k(3%PjzaNs{f z-ucxqmnMxL0-r9(re|jbuFhDMU5}ZupJ95vpc|xcoI6KX%lyruX(Ms{k?ZJ3?N4my zAB_*1sc*#nyogF@%}2uPOsC2oy|{5}sOqK)T5HR?^_8iwTxt;OufIC5w!~xeP|~OHTjoH|!}sM$ zQoJ|dE-6NBSg!-Y%WC9(y@ikS-z>_~ZJMMay2R zY-Q&X_lTMpvn~M(!N->A-E|QvJj>c<1wsswE(K|UlP`zfYH!J%>Ks#*_F|}Y3BTjH zK3lDI>K)&X0;}<&QX*rUih1B@Mrn_>=+f-c!>Er721^zQUuuh1!bO+YzA9<)1FIIs{u%onVyW8_<9dcsU>D_g#y`s+j^ImnA1WR} z_SX?NGGia!Zg!pM%VGw^ub2lYecg(T#KBxYwps*h4I9Lqif{F!D{RVby<@!d*1Q>m z5kR<7Llh|=Bq%4K4t8Az;ZZWnyA{~~y`gsNCrHJmLR2>^oa{0^Mw-vE@Y({t5o;AFS-KR#KGSNP% zhZbrWDq44C#gW>!Oa=_3`7wpCiD>+wyUVHe@r14+8`u~<9ozfmsC-GZf|W4}!pg?> z(USrXE}NmuFmW+nddud^C)w)Y-PzU0&Sjh@f>XhQ-~eb%Zfel{SgK`nmp(F?qSCag z)?76|z9vRff%HoSOJPf)Rn|F`OJ|aCxQqncP%(paz-OY4VCZ7fG~>DH{-&ZtUqGoK z84Hu9Xaw={^Qt5op?5WjQBnku6jHe^BkFv9`hm4tTu+DQRn5o3#+uQZ_k|BY`5pbO z1=9UXHz=o&rLr+L`aK;QGxZt~ZvGdit(u)5UiL&j!?@x?uXR>SXVHWKq$e;pd`Hr< zS$x`l$=n|+G1s^HwcAGDX-!H{bGkPYS!{ZyWfXJ-B!xwT_(7-CI>CJ6kSLCCAOF85 zJUjxA9c-DqAE)EBe{1k!xD`~l3;$s$p%%cx))mYB{bsXg(>NekX3 z3PARc*mi;;eRriXhx2n?m7@rBb3Mve8H**ci9CfOW|#!x87+^vmmp+zy=*hA3&FaN z8sZ1VQ>7VlJpl__iG^QbuFHA1+`sz!Ak*t!Z<-aN$?8qnU5|Ionns#ojr@;dkbm`W zFSGes+03gJ)KlBu3(Sa~68P>^~mQ<54( zB5a&ca$8tlMsz839qZB?kjgZYH-9bArHXmDoA4n`V%vlvlL%Q;?p~I)@~v7&`CT>F z00$*f;I!d8kr}G^&Et%tYzwgIR-oeWB%}Y&%*kzZ=xou9#2Jus#TPXG>tj+|i-kN} zs7km(_{VImCPKqrh&uEcb7&;)^q%^I0PkPZ1w{|#lj`tpd#{e7)LCTU`SqHyc{ z0Zp=Ld5(mp>rkGy)BM8@5UZ4UMjJb;lqO0MXbeFq!KXMJdjzbP${g`G~t zt|qp=!R>LY*j(=&#)7sDfS3(?#Cr7S`foClK*{FAD{D^;|B)(Co*uSJ3PC^V7l0-j z3mW2%i;LWnUNZ0AD=N?`pgR&k~KNPapZmN(9z74sBu0JzEU3Iusj z(S@pdTY$CH&;{5S1G8)z_DY3^LUmD0pf3=WfTHT@mC-0TqeP#_&8J{%X9T}{*cBSN^M1jF{-`#;NtckXhVlA=ai$8SxFr0twDq@KK zX6t3b#>(%CIqkVo28&iPpJkZG>8H6Ka*F#`5mdG{e~o1FL}KLbHOuHjv6#=qdWr40$M`bR1qsJ3c-@Y zbYH4Y;gFs}Zyf37MKH=*u^}&hRxvY=ST1?anHjWwaNA%zsPF{sNJFbc^SgU%OYJ8S zIrbWP8d}~D14Y=!*HrKk4rdusz~MS1?8QLEUCG;FrN0G?H?-d|-e-uxcc6GpBMBx=2W-cg`2@{QMneV(sXpn&y(RkdIku~^kp~hI8Oy%|Sw)xm1PSTstBSnL zTVJz-T+G0CHq*mwiIL3kp8+n@&>AfExP=#2{fkR`HT!#u-Ay$$U7wn+Yb=nzsvLJ+ zuP{U=uk_sX9837Nn)rvS_|kePpUAY^&djVs`3U^q1`zB=eG>hDuCV|{ zy&#=CM1D^x4#-2GaAX=!((eFKSb!?y%q#{eNQY}SOL!_+bbxk$7S5CUej?T zN-5%AJUBRrmgnUP42I_9aGADyLJ^I2YG5#U%?LAkgPendL$}sU0#RofF*r1Y{@)j6 z!WkJDWQa*fSR^GSPpTrG4b?Rh{OsueZ_%>hObGn{WgpR$8fgBx@c$VOHUuR0ycDE9 zn>(?^$D}GI3=IMC%4V8t1kp$<+3gyj5mjhfH6wZr^+~;4*h7tX&D4kkbZq$+?zW2Y z)ic~Hy;*L)tftX)|FC`I&IgEf0obyQXWpekv}+$O*#>a`yPqoXe^!@$0ya);bX5sk zTasT9dAAv_rm!7Y6Zx9uB>X`p*C6I5{8+yGA-=n%1!nLmp6hlhMI!nOy1eBbAM|uG zJ`gCq`7ooPc%Kj37KtWjc4L2pslGH4Q3Syb8J}jnu1@_J`K3rzR?6nXm=^EQ9V0yi z9!#4w3}QQdqF~p;6g+vm+Ou1+QUBwX!pDA?)$i6)aI@6NTf*{b_2NJc=%`DXTu3O`+bl!$ zNfqWJ_b0HoKKF7oY<1uDVol8WGGL9IPCtPZ(LHDbRC{sOpfIOZuWKh|XkIxI3u4{n z={{ZzIu4=dHv(;_-?`kpXPkW>H>gjfviz!Hjhbb7_u0AUP)HL8rW|TBFJxxCu7#L! zOO372?;^OV4qX9#@U@!e6lYq(R}T0|q#biH^20F#N1W3~d{8PU1*#n8>4OB+ACOU$ z-<=q!v9`mSpIBZwNE4pP!85jP7m;G*c9HgDQy|*=*eKn$DX@Nq=6sZ|RC+6+YxDK_$JMeD zn{uDXBU$d5owgjtD>P4#-)zwuZ}VQ+L;G6xdM0Byo`88o*%lGe_Dj`Pm|G*$g+i%8 z>t5}_R;WCl&sl}t&y{(p@F%Y~!ke1cv0& zr@cwlP(QF98+AkOH_?+X189+-afL2(JLY0R%tr2s=Z`;D50488iCCuPX%@&(QMAys z%L`#Dw~{-(WUuMX>|Q~`!G6-vpYO2sUq5IL07kiA#w3v0dv(_|dT^CrNx%6TOaRUC zGwD% z493I$hVlS%`AW6&{mbHa<7W*bB6?&l-k16KEV!UNw@|16qp>)t2u7|u)C$vj#C>1> zD9riCQa4-tuhlDSPEJdGB}pdvx%k79J4eZ`Lq?Se9oc2W%_?$vZeQc#9~zA0-JI@+ zT8uCEG0o_Z^2a4VU)$^-Gk_-WqcOt)pxvX-tkXL((9|ae_t|U4To+elND9gv@;=O1 zi($S@lHB0kX2Mep?!9#J{%ghuT5^%gC-BJWH+kOW5s6)XnNOeNgw|w3xeVr|^7RV# zGn!hY0tSu)4%Ijeizxk_*-L*1(!aFpdRt}}@zQa)+{u$>_&I;6a`)r>yrSJPYzAkV z_$+(zxo;}R#!gY^4-i>jLIm1wyUN|)&EHPfI4h{A|H$AesZQ0)dIyz}pi<3LCZFfP zR@T(a;5KcC9UvN6gsyM23$*8xPQ)1yu7(_xf5LhUVjR%fY{S;YOxL^ zn&cj6`$j6yW-vC1#iYIaf?rrlNxq!q_gZ&6hHtKLwXk+oTn#j@=;iw({;iAC)~?OE z!k4r)G5f~i;$r14|4Yz76dp0T51MX3x7epHZV`(^r)j}3VivECcdYOuX5H$KO~}cn z;STrI=F7o7*>+3zNKt6}?N*VrPSm5w9dW!Do2;52d+t4 zIM}X*`m3bM=Ng-a)qPe(P$9BaqR^EfX!?n{xTHi?RyLfOl~pycb1`lJoxh$uuy&-t zJBD7aVv&awX@MKka!2E^0oQ!a1i&L-tCed5u2&wnZy0-)YtcF@y%c?FaPApGSs%D4 zOK86(1|QwK$d`1UDh)KAHB}3A))R*?XZ{Fjl&2xs&(C9qC!b zW;AuKqb!N)0t-C=M|4ub(J82N$V|miQp^o*Q9<~+zv^0)&4VD9Z?4r?CN{hzN_XE= zS;U@vYdO3{siOHVZs=%6_t9N7ORS8>GL8BxU1)-LbS0Nj-hKP?kJM4?{3W=iYAObU z|3)+uM0_HOuF`{O@5tZX<%brrA{P4JZOF5BF*oEvO;x`kC(>v=mgb2y$t544q6_`2 z(s3L)Tkh-v^e^+fBvtm<1}Vew7@7KU>lOL(a-i0E9PG--6 z3#pu6+U%8(%kn3E%p2Kkj_s*4fEUGaJ;EAGkUxd|l_l%afla3yx1aO!rc|@m>z%P{ zg*i=awxDms>yLY$K8=~%?y7@sYn4Kc$YJlS!5anWtB*2u$mf~mcP8TUqt}iZA5taq zpPFE;PkVR6i;$5YmwsBo-+MrlH`yZFNTaTzqp^PB^nzn^=Yd9z$cp$1(Hh9Bje>cy`Zle-M{w6h>&TH=Th$(k)P1h{SlFGA3C^7 zh%eI+VYi3E_eZ&FG>Yl21VR$aag6v<{3){oR3c{LBz7$O%}t(RIq`NON~W+qamLkY zgwN|k5WeVAu?TfBEhf30HxJqs$>@G3k6JU+@tSKZ{XpDzG^;~QWOn8?P)S!$I!CFa z*mO;g#mW1;(qBwQv0l~q`oy%jFxFar(;d{cuC$1^&L7kywA%_)E#c`(!v+a;-d~DQ zI>K|Yo68CJi{ApFnV72sLozl%?eTA)z znl#I0gv5S%18ow9rmsO%M!qxr2dHeBi*~3%JkuVMOcZb{rudwTigAG%!k7eLFZOE5 zU=ER)M|WLjjUY)?8tJq~kg!JDm+y=7cQW2I?!7|y^qd&=!cBe49W88pO>z{kKlGBoa(dw(P1m{wrOuFaa#CUz1ZPD~6F zY%Z!=Ovt2Lzeu`=+-M@zqQHd8)QelcK_G5g#Bw< zw5274G7sE&b%<%oMy@P;71}6_^3$BD3PpgVj>-0WwUF1F{DrYzD8}Yo?q|!YijL@J zo@n&hIr-K!ZUC5B*sx@>5mnLUZ8R+W_(D+i%jg?D4;9nxHkSofCE&Uzz0{74*PhQ3 z)#S?}r{>I>oqIJV{%~Y?C?7$`5JLY;oJp4^Im_8z=X!x0$fovQh-c;8_nM$AV!pHZ0;<|_c<+_@2d|pLI=t-= z8z;2lG^ga3QYwc;=W}YJbq1yPKZIO2I^EQ}=<4LD#dhMx{tY_paX~((-@XYLeft0f zuXG|jADvgbyWUoo3=5;_R~cH;8W|bs)>|pq&sLWhDL80Xgn5OHmq!W~6{mpF9XZjVm8SP+3WaOyy zesfu6iKOWJmh^(bTy?&tpPr0tGYSz*R5X5NGe+31{9eH0mI*&@x8Kr$<&! z&JhmbiEEJ8o+D>wjipM^Ek38y$6`4wderjlY9=+gW>=P9o%!5*z$S5bFZ0nHD6flm zl-GTPP2&iIiO=Z z1*aySou)kt5xEX83qvpsL3$i%`r<|I)#OrFF zI`4LMe8{8aX9Vz<K1n0<~}Oiau;`0>W>=IjU0 zSJ{s^4UY5NP;w*%}@`vr8S z9OLoNhP|`vHhHa1 zuAu+mJda&mySw|`G&DFyT4WgUFx*l_Mz))bs+NO;=D;i;URBEJ8ICJ*lN}BF1T?w1O>+?lH6m60j@4$AmVa-)>=RJNDj6%Xa=WMl&=`R5$Bu84nO&-VRx4J+ydmF0%0 zDaMTfi#k5rbvdUHgS_5Ai-g5FPB75-OKtC@a&8_3Rr~8%?}h-Yl*b4}hkzW@1_b^M zY?F;W;%CEbFwAW{GQL0uxEI{G3_4x{Q(5;2jW^tlw$dwijM9pPKaZzOV#_t@yT=Pn za{Af4!Kvo6mwY1?IxtN-zCQg;PL62##<{47fgG?V!PJm{n0xQG8|A3^rooXIolpa2>&jwMWQmHMaiwNwjnJoIqHi3d6LW{2G5yMd{}kQ(95a^=ZgMtu8{6>23(g=-Gqdjcb$mB}jr0OCV$ zX=^Kq%#%gvpZF6iL#F$|`V{}=g^dN6c7L8g6-YnZCkR8^qMxpE+T2TX(cVU8Cs0|o z3gM2Wuagc%zKcnT>LxKTASRdKU&UQM4mK=-P_PS+ey z-->DM{glfhCRgKJUQY4K1qVN#Hn9S((7mRD=t%ATvy_)>7)vF=YOl2Y>Lw`a6(aiG zCgaqDjOP!nI%=*l=jOOgvy-Sd{@IBe#)UiJ## zk4FY|!ZABup+cx}=PoVaP-IxYEtv2bdSf9STJ-GGsaCyHunu=Gy9p-s|F{AbA zdL>{|85Y6ov?w65U)6|0 zK?WNlH}AC5D4^nK3bkNry5B^B5Mwhp0Zd>-gMxF{<(!0Lc(<^KNO$J%=oc{pcRg#Ejv+dD z&sd#tz{-YZ_AI0TJoIl*ZWbD*9(+&dq}J6S;U@sqkJ@5xEc>hqA`Ze(F;zz(G>IPa zdXOf^OIAuWDp5RYp)qj$$e$_YZvY7CE~2f$%ctZm-Kmy`Cm2r2(th7gThN*aI5ialg4^p$4c4;qTL=RS%c+GiJPffl88oD zEO`-tlJZi?;EiMlhJBy}F-yKRg zTK(?-Nc_@^v>ByCjV{(gZ0I;apr{9JWdFZz`8g$ce^pH{mWjaNbAYpx$m9w-W~gWK z4G9R!l?b@4%Ok$2xR~IGK4Tbx<5_|wLfJZI$`*27FOp(n-gqs+!RKps(M(GI-g(S+ zn^`4x4DuKO+AQ4(pzX~cLM)s3Loa^+{%u?B>lm(7j)u?6cUryo6NTGwTb94FS(U`g zR@iW)_6?D-(WU^)kX_dYJ12gzPa++j_^g$h`c+#9{psbpBp9|TESo@^F}U3+Fe)%- zTN!SaGWvxR$ka=}F+KsG-z%EpxM~_38s<>?((T;6u9!Vm-TtY^(32ob3Ljj!K*xReBdf zvfHk!F&Du;NgeP!aTlf7shmR!K}-DwL#`LOj;7%v)Qo7qbYbHk zX~AB|e#O)og-`R!Jn-}*?{@n97eP^ZiP9E4egY_o1_j@Co=+ive{frXI1=#M7*bb zq6d`9X?~-Rnn{h@mI+NZ4U#sfVBkbk?yh5#vm+$@%th=hL#rtG-99~5;0vBQtHj%j?;cQK~7)pqKi}_nD(Tg33K}l4IUr8wnwE~fFrg$J}z(1mIs3sXUQ1j zF^s=Rm*iM~%_RH~@>RlCl;T`83$GeruYy$6pBzsTx$x!Kk`Bmb>Y^9jow33j=Vj8Y zEMqaBc=GCZ=wI0!Z09LBt_@}!S;MdZPSsnjtAv5YLdS!FN7ah-$h$C8iob=gtN$v` ztG+QT3YogZQvqqz(mJAA!}2m66Wy7$=J{CY@cfOe#wn!cm~5CqwSL-*VGii-rbX3F zMB*dzVF{qhw|%V1qqgH#Vi32v>oO%@%SJ^hd$BmCASL@jk%A2%A_xT!bhW#F^ly{& zYd_b`g440-uWA~s)F{8&y&TKmH&yT+DW~tv*SavP1BTDGerO20!$drqo|BG)SwNj~ zB@;pkpF6L-l6dPhpKLUJc~5QN)t!$dGYd}_xg+x{AgLz>&(g!V`huDfNbD%Qn zW5>!XzB>|@S%H8-dd!#hXfR=MR>jdt z;sR8eACZZDy2<(Um~1XLT;8ZidxUBFbXWOc(G?t2{dp=d_)P?Din?~ZPSDaP48HNb z=(pP88pBip1GlwRNq*-0{!c^1A!ew%&NX-a{$~7B7;Ou-?I}UZLXAbAlHNweFZZ(k z2__^Jza;7iPGW5g{0wwRNeoA71Cro%^4co@&a4Q18#oSxa{G4!>1raMYe!*Z;X)~k z>xr1>A9}kyzY@wVbbAbniyG4os&G8|>F12auQ7&!OF=IYaFymTTS1d%AqJrN!`1C) z5S12-`{|6=zflSa9DPb*)HLAb;q#Y-$awU7k=8@YxC7gr?d}F{>g7--9t*m}`x7G% zsTN8$2gzL&{637p=25dlT{cr1`{J)~%OjJoA-IFGyC&*UiCiYL8AALczszFZOaPZc z3z%M(mbeYzLRt2D<;EChln3mBKVS>dM`#6!YKk^D*h}4YDAgfDcqX`)o&ma8>0t4D zOX|A!m4oSAe3Z72TUk6my_)n#SN2ZBgrA3onZ%y!9=I+0X;d0NRno*fD3!8r{)o6T z&{AK>3%(4m(RP+uMOPMbAd#yJtFeS#_8+HP2-nHyymx%d8D)WKHjpJRtI^1xMKYaa zIbr7=)TpKCPfNcLdRRNNA>GzsC&q#`wmY(IQCZiu0J^n)Lk(C@WJKcopPk@(@9+e` zL2pxSm}j&3e6z4+g->f=qtE?*?g$H}PXma)iD4)G+l6z{R6X&sZOsW9ze29}lqitO^IGd@)l_I|_)e&^Gm4CDMO z74bWpyGM>%;~~txI&E2*6(tSfrnM?HFd^bJ-s|-laMxK4u2Yb6nNriEb+DLdus$&s zFy~RX+*|TF)YM{C4X13BRk|18+B4qYP2^fkOVhED-u@rV_&lf{y1tc0=@t=@`~E3W z0C5me`2L2%sFDBmg2Hp&2S_K69^kiWY+iCRy8PKF?I|B1E91FBRcbqw=6pD~Zat(> z7s4^J+QPV6?Z?RE0BqfZIo0s=7uQVW>KG>A9Mgc;6rIsr6d{?y_d%rfTw zRX;@GpWdeUz)Ub5Vy-&=z~^0OWmg=*SR2kg0GH?U@M7ic6vmf(i!Nv;ckO&grgSFK zs#4ST?IIR*l8404T;*)+8k`hOQrj}v7FDyXy~Qf76(J9Eq zb4Bo7lZyziHJFv2`F)3I;iG0Bur7NC!G+W4Qa(FaFQy)CZ!z(YP_6DJ-CY#!xGik zyCVZ6thLrT>8*b2Vk6^Iy0eEK*R*&Yv7R6PtB9S?AnSk*a@*hL0eS?e!DH6$SIPmv zBbpJZDNea0qTVV}v}x&J5+p-xP;q|Md03&L8(Gt@tUBtDd*QQ$4tz_tryO>!E|f@M zy-8+0KtGx5LTFz)7)yx&)wcfvWs^gRtlH3yBFgH>pby z!)k<>!6j;9iN%08d{*s?biBZyyIIE{vzD=W796|Szf-AF(|aU*58w2shs|CQKV+wf zWI>^LB^{52NXF|e{K*&Xxw8`q4PhKHVtBI-`~KF44c%hwr$<3O>0ynU!nzUS=AhC= zMzdo~$5*e0i8Ztj(e8c?BlI<7nFLTA$5L%0$-}tA-317lNFdY2%%A{!_bC5fG$y#d zWyA-6MT;bCEaPp2s8dbQ1d|YWECejk#kTMBuRAPx262CvC1<9xsq7+jd9Qvp1j9iy z*ugX0&uWl3BtnSx1k2!HcAJBul}3H0IiCulam)t<+Wy7Xz9U20N8~Ha=$vhv7u_!@ z?-IPC7#0G_@+bdL<*4!fIbh>+Rdj1ID9s59gL}8WFD{b&occn9-WX40CrCs!85)q9 zE~lzaV||Qy@R!;HTs?c52fAi5(df=kJG}uNGoZ!?f7?t<5Yp8r$U}A6zO1k|Hl?uw z<#a|ROix?h24cmw(*?2tvIl-gc;F)5JfH@Ky8cw4ZP;?PAoO$yC@0=!I{-WE0x&vK%KG`%falms_H4BIk2}m@olE2ffB)up}A2Vc8j4{=m z#HcEgtTYpsg;*Ql9ZY*#2TQoFR(FRNwI{XLYzMRQovrF)NSR>we|s`A66e#QY8}#_ zeK2L?+kF{Sdn6Vz(#A%DpIIu`CfxjC9>W+DhkgrBt6mTac*>bPJe`p)x8yNC-wjju z5n(w4{h;(4#&uFwElvqVRS{;m2)y!KE>)@ByW3v!IClLL`t6X%RUXkdWmY=!0wy%0 z0%+*)&&k!$*d25sKodQ}MCZjvLvcsz5SRa{t=&Tq;+?*#6IJ(UB(*??Vg$-k`r|L; zs4>qp{by|jm)#I6^qun;v#c6k7{B0rnL(La-evu}ohrE3kHyk`kvLMmb+l>vD_R84 z#M;7w!4cD8vaJG}3ngC49Xe<2ZIELUnGjSRNjdJ6l z4K?OLrP1wW|5Q&`cJNvbzVC!>nYc9TWboA7OVkFrgeuvJ#9t{QEpW~aEGA*QL9#(= zyQ2@%i8g2c#w`gOux2;X8?Kytd6qT8w~{Tv1T1alhM7VWl9}9B@lz6@#n0AwMmEfZ z?r$5!;b2|k+?u8A(&J0fm*<)LUQgw}S!VDouNs`u2sHx+Q*8J)lf!382I0|JK}*h! zD`-ftc0UU;;hhL|I((_|$-kYV%TuX@1O)`592;*f;vVPA2`A%xjZ~2x2 zNu6B(9F5IFLQcTR?VhJ%xg2D`d9UlGnJH7P^*tdXPci7AjN3tTSAMLSei4mwVRCJZ zZzM56kZ%pHr==>yp&L@N1e_4ZTTITszOUr|BW){A0f$T{V-v#y=;M^d?Q@3RL;zG{ zo!bGIVuubr1e=Y4Bc7%uB@2~3K0u&heC5a=0vRK1J>o*qA|dx(D-;Ab%+N;IB%mJF2FGA|_`6-H^pNdNKoY=M@ocP--F8Xm`Gq6T7RL zh;wxPQ?K!U{FiJ#;;A+;j9W&#$<-2?n>W?-k)stF8vcKS-K{@e>>GjQ8wD5XUnFkD8fSHneE@f0*-{eqhvMrRWra zvxN30L8V4J3+!p^7jDvR15~AMOpX+@*iB0;C=MRj{pnL1jAhF7^>K?b6Yna|<^Av9 zC8-@^p=EP#8U!*;KiNB${-=@<>Bspa>y=}TXs~f#_L?7sL77-s6uzuBkf-YhA9fXfs+?z!@_&aFCRfNLB4X6;xy#sSH<__u z$YSu72vp13W3M!jj9sB^FuJdj;k)ry#=_wniGA554VQ6Dk%p2v)mxbcx8hM|xx7)` z$%p9W_ynEbGH`FW4b(*5P=1n{3d(~zw0B8gY+Gc)| z`}w_|GG9}PnI3&}Qjxh#uFK4jVT)F(Kn(Ah7pT^=Dk|^QxGJ3gVkqn{L!ATs`i z9tp1IPoJ%6-Tch0*ebx5jXr_q88`t_I~%1PSFG>^!Xj?6HntIy@Z^*f#%D$(^%-JS zHpxk0iB7@vr%VVOgmj$EYsr16eUzEcTMU&$ZvZ>A8%5+%MS7{yV`Kk~i9XmNO86o} z^MLiCUGZnaX&9|GGvQyxN<10cKyqF8UVqy?>AN3Jjf%?mtW9hStSbXOX&D8bmQkYl ztB3ka7bg^iQ*iiO6=mh*!9n@hBqQE!K3P?JVwb#4K-Sh%!B_fd$=5}Nxgg(D_* zIG<5rsif5v3HyKKm*TTS_+*e_{H6;J zXw-E*%Ho+*66bOqJ#drkQw5_8Pias%{~YKZOZh;{>S#?njlT6`akkt0S*<#pFp{** zyxY1+@MmB8&zF_^zs#NcM03jrsnK5;*@T19&aw2}YONAUxFRC;q!QyI3upxwJm>T7 zw&>8PEu`v{(0Z^|hFJgjYc}S~ti|8aTMzvCt5xzThykdCtr9C8w#apBHP(~~1{2C) zK%S|hn?cr7;fd7?pUZ_DoZ)BO1=I@#M>8op=+`{szRFvz5ulrJ?Ix3Npsj5+t1Qu_ zXjF%mKhwD7hWsjBN9$c0$-^%{N#Y0@M0 zj&r(aoJR=w^g$jJTSLG06x(~?YW&|Z6)=sg_?7NlnlW-Nrnf+>=bSW2F6L zYj*6l!0bl?he|A8*Q!9Cpo_b{D~nQWEL|&q9v-lgp zD|FaE+4wy=bCjFT8e83}LX<8VCnpV&Z)8V%<2avvaNcuo>M|IuGDid9S!3s?-QPHu zmu%DC^zd>WMf)v3W14foYaOWgs!t&cD@;&R_;PfwlV!?q>{B%kMy3_O7@#Pner@iB zhMdV|Ca2`4cnqFdEw8lQT+n6tlW-bYg@v-|1!ubYyb!#K=@d~M z%Y)b0`+>$h>Z7Z(mf2@RZ-wQW#|VFDVai9mvFlDs^FlecScqDjAvad#*$sq zhaYQ_EU)7@O0m%QV&qImz^j$%Y}SX>HYmPTM112+@VNr3!pMjw2Zx%=pas<4tS_Z# z5`-DJO;@q=wfUC(9dM4Bulbi_?Ilqnic+ccfLEl=LymD`3+5>Pgy7<%5_Lr<{#3HM z)G`nD*QW=hI~m(v<|fXEG&aCM^RmVJvyf-uezXYIoR zA~X1l)Q>2hv%|>0=swtzR%8-irV(D2lp5PXo%>8$KyBhEs&c#lC)g_pqmf60HmD=9 z5rE{b0^_cg$=4&>EUG^L6!XhnGDi4+=BHRGN)~86PntNT z-BP!D5aPO4+Ztv@C}1v71p7dH?c7UN86>AL$mIr^YGeYPV(arQzEBbA(<%{ohE&9F z-YC|IJcDll)PLC9pkWKY#`+hgVle!-sp9Xw7jLdOc`x7a{TzD%iK#bwLjZ4Y?t1Ie z?6RkUU~BXMK8C7it1G%DO7>;i@3r3S1CWhua}Gbsg{Kj!ejnXF10?#mhC&VQI)LYt zff;TVmzN3cUuaXZH2L_Gong#p8sW0QU!|``qrdUzXIJ65+e?m^OX0Ob3?L0n6Y%&M zh0lk4*BJ@;M&KTSlnur0L=#eua?C3{D~)K^?GJKP3kC!LDdy~L<&vUS6x}2ctzQQw zjmS;EI3A#KN?cU4R8#30d5qr9M8a17eL>^Ttb&=;3(W$P~4*BqtmBK%;hh=j|S zSY414F=^?N z%`EOdP!atvL3q6a)neTzuTHJbl{r&)4Za`GY~@b3Lt{VObmnjT`#}rRKh0%sW(6bR zxTIF!o7-21>8@F}LMIE3Qt8v^0AdY#SWQoVC8SYY_eQ<>F|+-Zg9-A;5723Vc-U9fb{Dvl*-6IZ$|wfovlbJ#4)^mr znhIM4;Wt8yL*Fo9uwpE6;Xck?{U0{yLPfh|I+CuEx+{3INQmG0JDX<9H5bg8HJ{Ro zhWN8pHE}UxjDjn~%Nh(JsItkkmcWd-ob0X-&eZI+%NC?tKaYFq`Bc~BmpKQCYS6pS zH6uyXYHIK^ix!Qp5=z|ooFnZdD&zCr&AYm#R)kPC2=Z?tVHofe!&geU6yq;ez5hM0 z-#@zXg(=ZQ{7Ku{TR;@i4OhI5kkJ5mL{NrAc8>l2iwxl)KB-r|FRQ%&P4;lD3FUC9 z(4%(!0yXkj{_DtQkD(}R-oOfLjZmShwK?K~hboFM)fJ@`wl|EGUsRovRjlz|? z7}e&liQZ4fUx6v?W~d39Fed?*9NFHtvE?IE;&~Ul{VKndCmsd90WB*|w=X{Cf)Qpv zhtTS`h;q=skWpj4-1(K-9k3>H#b%ZSeXm2xIM$;$yeCqYLkqZ>d3RhLjx}Sj_|Z*v^t;BW$5$xO1vtZ6mZ^MPIL3 zv^FeRXb=Y4_h;4hS?XfG`SW98OYE|wDHIPIn_05>#h)R~l!T8lPGDrvnRG^kCE7C+ zGw#yz7d(T7oBfh>K~W?^G)i#vmiaG5=XN|(9cDUQlZZW<`7Vi^o@Assxt8Sm3u9?a z2tKVrSN87s0X7wgBx8ddU~ZICn7(UFR6Znma(&N`2vUdj>+YMVVEa)9;bY?MyhKSt z9XZ}U4{5y2olPrq4?3fr933hw3eIY`X~#Uq^&Bz(8!;X!S2h|d=R0n<-!1zH{g}eQ z#;I`51c_MG1b`nQ{0?!YFYg2U(c2|mwSz7vX+>;*5uCS=)-{@jTED0tHp;(@P4}6h z41yjRSbLdOq76Gv-6uZl!Su|UMtUP~iH)3w1KcK{AuZw~dB&hhI7GHn#B2gRQxr3L z9R=k>nR=%gO6c(W@RZyrLdHa7_0&*8h)z{uIpUU;IjB-Y=f&ugLsgR zakbWcpQo+)U zZlXNeb`&F-W8n2y0YS;t{ktD=<*?kojCSh3?awToCrXU?z#iF{Q2@F>Ag{KjE}3UQ z3;1>3o=D%uYT9!r!QBu9eS8qf{&@GNm-qZR@ux3)_6_80nw2841LY0FZ*GVUbzOe|ClD=QTJ9aJT# z{5pgw6_>#_Q6Ef*wZ12Tb0mDtX-eR6aGA8s5JQF=b86(kwj!p{Qr#RqCc;eo5oeGh zz#1ImoGcjLCDSAu?WPzb zA=$K2H%j03en@9~WAIm5LnA#h5=}*2J?*dWa?b$z$j9nF$P@@lWJhS*P11XQAoYp? z%$WG|bKY8_>Ly^CpeT2-n22CkL^mKQ3u|cz>&-JX?XBvd&43s3Gq{DeRVv4`<#jrC z-nV}-<(j6wf3W8I_upF%B7QeYX`>}k9l9oI#dbTRM+p&d9l7G<@9Te$34n3}dB39) z0oN9nhWKCUqDU(Sa9uk`37gssvDaT_kOtH%08ARb>#a0r67j7AJs1o?iEV zq%{v&1XEXpuEQnm_^j^uOK*KPa~`v_l#xw6-KciUj8$R<9dK#YLh9Z1a@;phY|UqN zB71AHtx(ZAqcNmG(^s(O)Ik9arWcwRF&@2k(xI94KP1nnwy$-WAmZuP#f3iuB``gi zc063iZp4}rbLgHY?ki;XiTY%gh=mz?{05>N-l73uZdw;^dxV^2gjd=Z#|#U2@{+=^ zc=h~2R@eNZab;**#1sG7YrY3me9cA1cEd}o^K$^cX$o%RA^Rgt@AR)dk#THD# zfQ`;NnUxQ+C6aIaT?sC_(q__TJRFL*Z#!;q>WuCQk&x9<2!q4%`1B6iJR01d(>`P9 zKg2NdbqBwOV!LWv0g*&Bl5>!Nz2zm`b%C$5tMO_gHjhF23uZ&20VJ9Lnbh(r$LujY(=JLu#VFhzzlfts zB{JALA(Axl_j}{o^EZXsxxBNe8uTV!*3kK}S7qF9rvs+$np1&& zY<%ScTeqemN|XbO0Qcy{FHVd*oKUc5%3?+bSK(&2pkm7@Q^+cKo`pHf@8@Ml`Ri#f z&3udlbVuTJoc(+qD^PI9ij`A>ogG$=B{fj-?dwNrBRACjYa64_pr7>XtD9m1hex~D4Houpff z-`G#o%xaglxEEI+t<@j{lK(6p8LjfYj}MWZ1~T|S7^6#v1XLp=EVdI6a1$P%CdlE> zQRUNu^XRFmkYy!ki+DNTh)u9$7Sz^|`jsZZliNtPp zCfLb3z=m*Bf&r2-{?(fLf-=Lgz4-1fd#P?Ib|Ks)1Hil9?h~IqNy^^}-cboZI8?pe z;%++T5gj*1{%;)OMz#G49;q;sCy`;jqkrloltmD4XyLD##pmaOv$!D6(M{ojp3|Ir z;Jt27?b2_u5*<$<7*m=&Az`QqJts@6G=FPNO9fK}C{tbP%wH0-oHNw-+s_AicTJDB zNgg7Y7`_Sya7>5 z``W`)u=SRHfafkd_7%F{Sv@@930cg2%g>ESq&c%5Bk^9vrr^~aBarm@B)Nm0j;%`s zr2nk&lTbEh{7k#26p#J!ssQ$>J*r#hY_u3}WmPAn#H^$6=o113X=Gz5n|@5E@i_!nJ;tt)dF2u3 z`*JHEiOme}5||HpUT>4g&~7Rl2K-kOs%oIrJ>4KuPP%qm1*+}Pg_SGLvTHd=x~E+@RcxD|P1@)v z-aQ;XTNsax?ij&R4nJ`gq;cqQAIy3nS7d`>)va-IP5~WdoSf7Fh#)Qm)a7-5wDH{3 ztVTDb@F@dD2L-)b5jF(RGPbNjtB9v;1(GdK6}q|AC`zbU+wFfN=*1)RQS zJ>#P|uniJNRH5;bs{x2F^`iGPV&k6nk$zKPq_L>%%b}-SRzTd``+K&c^#exqR8UeL z1ERdr)|Dz`B8XomRKN#$0G6sKf__8LeoN}%yW;*9&MJp;Oo+_zP=_BWhW#{;mI!&D z2uL?3`a+QeEQt6TQG{3ZlTsVfyf+J{0SP&-4nK`KC#exD(gYE0kg#blzVN*IPU<(` zNEW1+9qYR(}=+0XXtTV0Kb$%!5Q%a?9;yIPMG zZ+{DVtFHdF@2Q7#B>A!G=u5(Af{L@UI|h~uSB0qUvl8K{$c;l5TA6?9#eKOnKqkld zZ82tG!;7DElrf@7HgI#XN!#1C+?9CUk5&+Tym6L-&gNOCi6ZpVXi`3?X|+4TE_{8R ziJ$gShla@281X8pl6R8|r>x20uWAOd1JZhu<~pNxRcPu_*bNu0>=7ZFXkq&#W-iO7 zL{9&KWNG5wQ5F15!8y4}GS`4@kHYT1Kp7*~e{)(cr@3sw)a67BLQ^AxEx%1c z()O;lCRUAmBD@gzAjT3F;&#ohE$dc@7Bk|*t1h;up0eqA3%;;vz0V{3GK|!%e}EKt z^C3=fkQtCoNml#ZHrQ!Fb1i)6fN(v1M~;QNW(?s99!NKeQCI~U4!}i%G+m~>^N`c- zd}ILYfAxTroda^-1^_oGGY7~5m09>}-gYiB`&7aX@TmUrwOSDBo350D8A$=|r>3tW zDMh>DhP!BTG_C1%-oZix3xt$vaT%*`DPY=0%Q+M;FKLuvga>WquM_i*gq_~_@5EAD z)|H;hq+4_$s^GBQ{qPfbvT9;uE%mh7o82jlXt_s(1xsD;B>Lt=!NU2s+Mik2m=fcQ z^hFE@N#3LAJo!`1N`Wv*7Q=9m6~l3>VXdwhwSvkJl?LVT2A!&I0R-N_a#T8Fb4VEY z5OjE)y5Un-r#or;--XT95%iBb^8JtZOuwq$bRlpGnwr2C`D|)lR-M3^W`)P}%L%xD zSFkTZh{3-3z3pFO^)f}&$3lvo9OCEEcwA?r1FAJJ^>cP@npQvg8asq!s}qR|8|*kG zZrN#AL?Io8zK~|$JZY0m^|}0;gSSjZSHyR5+$$0rxu4_q5uT_}4x8A{OFA20|KiJHbW@txo5_lkauP1Vi0jTkU-$4m*P zwT_&VT<{F?Lg|0)K|Uh_Z!&CfZT%|WggBZEPo8z`)4t!y7oBBi$uBXldSa`+^+^`< z471+p%wCI>7SsK%3v5q%aGG_|vBTEum6lZDBD?qcUF?yLGw!Ddk0I@S_cPDMcCOdt zQtkIb#vikmmHDfG`|BZ&ks&li^g(&)ph=ifXFlEpO~lSuO=nrRQP8nfqkgQCS(~ve z31R0$taKaMBMVLaAJ8QK`65U4hQ~IH!}DivTGqfbab!HDzoyBF>$#pdQz()Nu&dC0 zhbZFk{Ad8!P8%+h>6qbX`4gfSs8)G4eaXrjT&f}v3>F$><+p%8XPS~%qtEK_hS534 z@N_5Vfmu$L$-~|YxcJ=;5Nm*1C-t1}PuaFJIjQW+_l{fGqWV3D*01;*S8n1T?f|Y& z5gjKvP@CL7(qq4l1oodN|G9|V7zGTI2FmAf7)FL~OaI6Ajr{k8Z};XU5Lnv20mTrV z$&W9&jkFH_;jAbDj64bwk92o5s&($DQ{ejmeZuCuMIM$CUUq?YZxkJ$N0jzsXd*M zm_7$_C5rWUVWug!`bevUh);#OT@(K430TMlo1R!**l6&`J)p18w_c|P zOW^s5(HF0p5Ariuc%R!vwoO7Zr3A}Aed$sF$c?W|S(8Lu%XIJOtwdnR z(DzQ8yH=#${)!YY8!Zda%|WD1ZYE#>#k#r;UkUGP=&B}3vNzJE<+7rhQb{m-3L<^% zitk5ds;IWQOrxd;30|N)xb3(kRaZ5Bff8rUs-6pNo?E+dHao^^_q}VXoZ?^r zy!WN4TF9>ylIBC%$CpL{~3Uw$sAK<9Z7 zgjq;W-_WH9K7hLDMhCS0TRyI(W*j9WmH|DjirS`c`R-@IbH8%Quiz^_k|IzQbU4W# zhWrp%F;P-+rC_r$^mr=b!u1eaCiX@n!H_f8veetJ=U){AguD zzj#K}pAs~W;Z0@b_)5!Yf)0YuJ6msy#OW)~)y@a?!N4EHf2+Xpm!wYKRN8%R~y*Q*&f`E`#Oc!PqVjk zytQq{X88F^{w+dX+*NQDZ`t z)CnJ!G)p01?|-_!L& z%^6N;D}o0(2Ir>9>g(^7piOjuUKnk}Jc_wy4e`hBSkwD`2f+}V;1~Ie(YJa*bMiAG zM;-0Sid+n8U9vnofgJR5>g;cA5U@t{pWM%T+@8Juat&Uv=%+@U2?5gf*>kd@R^1ok z1wT{lBy;Sp-;djK6bC4zJTls`2LdvcVLorTBAvbaci zp>n&xhScg5{S--Y= zqG<76fH3p4qVAQ=Y4bnAPdVXcg#YR3!1{$b+JAqhTHjt@B;~kR$HBWN%8jjVMT~l^ z!P*a!hL3v+Z*un*SVh%Fb+dkdXUd?(&ti@kyvccLSf23En2rOn<(B7e zqqxYpXKKG)ZBMOnBfZW>Zl0yhIK|0Ys*=ujW}s;jE0W|YpGFeS z>ab;EZ<5wH`;WvJ7)IL46qZ}OO{Cf-{yP@aPeYZq;ld%D*o`(5AC^y-dRB0nNqIFC z+)$Svr_6mt!=kxH&C-LPlmG3=7*g&fYb(rZ=zZ5z6g7-Vw*6wHLntM2g-R8FV!SCd z6dzMj*n;b4)aV@;}f%v&X!eFNL*eW0$>#EYu~B)g*e{$<1wUohQ%ST zf{Bj~V;)`Y)DC}oCRAqD;=TBcnwLspq^8g*gS0J|bM7z07aj%vO6YJc4D!%?+$~r) znVMKMt0~~=0a6S|P7*MI$iJEu^*E*2ikU0Ib);q*oO(+7?8igr33=xs-@SIMA4(-i z89F+!fZHHW2(5aNDynR(SVMb<`4hx|;sr0us_$zP{&7O%vc5_jy!otfx9E};dFqy; zKs_8;kRYi$?zU?0qvpxAS_pS2Ql{|CcOnu4Dp z`_GZZgcMENs-i{ zf?rozNtG1dW?x19H_m^P2iNkhG;dpt<PnF}<``+e^7P)O0? zDY9lOYY+`as>R>{ydC`vCm=JB5-6KLsY$+C3pqpF0kvg8E&xQMxt&k1)6*JFgv5rx z`yJE{V~G9=Tt&E#_75^m_}GirA6wU#fcnA97U8LVn!08?DrJF>>IVfc_Ogu^+z zuTU?sO5QKoqgaY#$thRh;NrA3=6Piq>q#MY1}0td{Qx0yDLF=Kx0)Pqzit?UMck1& zeQoQvStVokYBAI4;!>JPJ{URQTx~h23!cd8Qk1b!If6JC^bIElYOF@T^4e`7W8}Rb zFhQ$Wq4kuH7hh`Mss|U%1=*us{{-v%=jp4Y;=d_kQ|<)S^dPdrRV-+@u+t?*GLE5m zoGK6M`z}i`5f}UA)&<^w>V8ZsYE7uon88{}vc$}snRrFu-GaWM5R!1Djxpnvc8K$V zMawsoLouV9@TA=M{%sgRu*|4Z1l=fftm|f;CWjj@#<{10?l-3Fl%R-`YNhHU2dE#H zAb&y;b`R0HDWf-@dFjn~565g>sh_LJu`CFwJSIpzr|b*`vtpML0VL4s#L3_t83$1N z+y5#D&UYyYZnC}Uy3g0UuEEK{cUKFC9fUMkT*c}De+!{|6-^(j#*1Ty%04Txf|zW9 zFCD!9&7X;4$+}52SMA_31uyJa)jc2Jgpdf;-s=hC=UB1i>>q;RK3s49`sMpFGhxu? zLQ}^;M^d>dYM>FEpZaJSy!BhKX}&ZfeLE>i!;4=tL67}oMj|@_*95rcS-b^X+GPRwkbWZ3~ilr4rUUK5+n}><5;7PLh>A7a4N*BU$Z#gSGk z*1V*b8DgW3%D`GtMK5EnKhtVnsynT&UR}#fY?I585?t8H)y;g5((&sUkvDraiu4if zEcb5>;>>=TPzi$YWA418yIIklW^3mv8ZYK(l&suMVhGbVt*YMd_r@1qbhcC~#~Yi_ z5@*E7Bx}qTI>nS-$n7B+4}Ho}jEr|?PaGzAhpV@D2wQK!@n)S2;|I0wu1cJqZNT2xzLMkPE2va({qwGxM1(ogx6d7oQ2&*@5~uGeX^qRiIYICE{; zCNl8eK)_wO$|oIviBk!$xxk3G98lYi~X1VOwhw zc@qMI)eMms->h*-Lz|u-gdY5E(Wl=s;Q9KV+?_6ZJE)fG58or( z+T)qy8vqOHDJzL=RjKhT%ASA`MbTk9JjT`p>Ftv1n)*BY8)WzEk>ZQXrX7lzYlG~u zacv$qco{pdK6F3srbv$U1dTYjcEkkoPA)ls*b{g`U9@H1+L@=Bl|VcdUWv?ptOl{` zVoQlzyeQDgnSkDNoT$cqghB1K=31)8J1IzEPnNLsYJC3#V*@lHQT_@S8altwb2X)u{_c6hN$;!q&#l@0Aju7U*0y# zd&s(Hu9Kd%kDV#ja~1&+ja|>`?SUirL2Up5+d%^LPINB8|4iZ}enzm>JW;mG2MVJqRJcs^ zqty9|D`#cS?5Wi;rzn?CzdQ(9XW2OLTXkBnI7H?okAy>>TB3TkW`zBi~a}V@Pio5ZrF+Yi+IQNGrJykFF z`|gx1U=0|5bGrXtfu}gjBl*ac*Q z^u&KTm^1SgEzEG4n7oIT%pL0g=l#K$UYAg_IXyYHC?Z_g?U0YHoD1fd0tS!o1qTT_Rs4xV71IFD#v%IYVm*0Z#H)gzO!!o7Z~J%g&%Cq3G^ zcPIiuq;7x-bN|K64jXVSKet}5JHdPZ!Q9*JhHw5VxfDe)I7=erp}_{vOd?L5W;9;e z3%S83J=Ek3x`jU8y*RF6DDA~%u?Jo_h z*GEGi8X-s?c5*7g3^|($WYJ`4K}Fx%&A}gZ&8vnc{^WspidZ&8p4i%FdNTgnHMsXC zKrNnCQGzorFTa9%*-k$4GXrskwfUBP;ybh(SHo#|9Bl+!N%PE?=gYb`+}%tbv0rJ| zvvept?0}LrdH`2ca8d?&dp*``iBk0xKcj+ZD=ztO-ea72Zucbqc$9l2KxXFs$dJi- zHrDmd(AM}Yqx{~URUD<-%;yW3gx0zX)C@8MuI;(lhl=4v1Cw;4JsrMp2AS?gba-wB z9A2f!mak|L@{)G<(};i<1Lz>1GGMD^Sc58UFW9nBD4Nr8S z%)4|v@o;QG)JFp*$FQD=aKPrB&u)=qCi66{;^@Bj7uCb5=kD1b zbn%kbYymXJXI}37V$c!89jf>rRb~EpN(48!G;x*m{xgE!4H6l;nIYf&p1@-(nJNOb zX7dv*@>Bk&Vl-DUX!tFihI`(6pmSbfFZ7J=RgV%$xFh$Z4DJd7HsE^qIKPO~R9ja0 z5a(o_;|E_QUUgS$5_}%E*z+mdT#7)@og#pCRZ+A)?#rfy|TJ{QYI+CQ!%m<=v z$4M92I*nPqW{vQ^{!|^_slS%_Bz-P`o3yHUsx zsTqfq_bfdlNtie=R!Ezi0EBPH>#cH-0_k=>R%B#8DU74FLCixIQ9kuU?Wn0RpX5zT zN-AV1T)ipH{Suv?^d~=DEFw*qDxL?1Y={KDurCJOKDZK{0&jKnNgQ23P`HwN8&QVq z@m*f2FhIgyo4FW})Mdj5sO!gPJ@vs%kgos%h)fIxaw^0MJfo?>X1S+zAxG0MSt2Pn zFhA%wAM^t20G6VrjwHj5HQGAA#*;`bm(v)fBefLq-4BCHeY5;0JHcDmQjVlSJU2k< zzZl5mj)}UAKz3aFiNbz1uf8p&g9g}FXz-=#IDcT710$`)-@lJG?)*R`LsZG+xBSOY{Od3>I<0(a32LUiC3#kAqEppOF4v zCzR!Bb?w|7bJkaSV;a@z!_<5KbV&J5_&;k+f4VsAKLg*9Wa913 zRTiB&?cTe4G%hjT|7PtSJ~@t7(ZL)-UVXI#|4Q4!<_McM{=S~6v3?9&#Tld2 zjc~AVeFhr+W~;?_zZS<|s`h_jEqnV0^pG_v+iK044N8t0@n8_9>=Tf6AZ3&;>^$+H z_J=m9+t{q-NW16Fll&kLD4|sVYSSPT(>@bYc5**u6L&JmY4i`8o9F@BjA~15O1I&g zRs6-{+SBXxSb2lf&mM$x94wBCxu{dm*9=V?fiwD4hq@sAWtY`srHvyZITpj;D|&&u zuj;G+MB?V)`{$qhbmDi#tM%lVegJsAbs;+x6<7KWe54|s!rxL@YkssvTQFvZ@1qT<}6 zx7}jKr}g6+4En34xgSr{h;2vBJrm^G7j>i;=v1h#i3X`uz8#HH(=meKCf}kJqp#;^ zb{K{}wsvw95a+3_3D+GlwOeghlF?rNBAS#<0n97yn+^I*q^(!w29DL!V|mjpx0aDJ zLBN3v91|(ou(9F(|j5PIBlBm-TRw7$G6du+s=sR|C{=zAG&i z7@-tlhQLt!B{_cD6-{fbO}CfZy7vQ49HhUhq{|~eVcdII48H{h9&OCcY~SB<69erg zkkTY$W4NSa-b0xebzM(I&2(LeuIksAa8+W^E`}l*!bsGe`PQ zv2Iq!>vsxn9jcHF$Y+hFMNc<7b2UP7WgpQ2XB(lW9``jxgFp3aH1zRSdhGo1EpGYS zZyStVfL0O~IVrO=jLeWgcqM-BA9oZl7V#DI<54tWA98cdBWa9`2qVRT?U^Ex_=PaQ z=7{Ur^&81NCIs*2QWSoMECFb-C@{Mws|#YMx$Q=7uAJqY6&xmgn%WPfE}jVeP^Zm@ z!F#@Nt9Qdu6EjQ_8sFbBz|-RTLic+ej& zJKt!li~3FaH<4Z9+#FY13#(}EFnZSnn4#rzCrz9Zl0(8d5f|urql$3$mC%KbdOMJ& z{bRU4>-nw8r<>IyeY$;Lj8MJih*pfm1Mnbw-PELq)k5nYY0DRx^s^^{kH4)m?Uo1s z1Lv3|;Asxa*9~c32R9pit9@g6Y5E^b^+JH2rUbb??s2u-@asRxERFJYm3@fDNKKY& zyYa)OReJl$swMMo-{4ZyrW)|{?V5F#tZSM{T^MCWnA6*2KHb8Ai^HfG++1KJfcWrW z5iV5Q5>3TsLkda!Q*ffO<^~5Pvs3qtYrYB5kvW;PH^%43n_KUBQVtA9Z_t4nQGI)m z{v4}!qZyP!EXB^3AVPvSMJjX=$tLUf33q~iSFLAqj!ymj%EDim*~UpbLK)|r^h0$z za8T%ZCj1LeAR*QXrWBqS?nIU3fE8wO>-nuSe8y$Oe?{Q_*SGK=oX>Z^39`|DNbJRR z7Qz63oNP%Y0->P2Xxnyk zI^#dy9Y#bE_YK(%@*x5Dtf_h%G+4aoQMBT{vTvCGj(?5ur%J}eIp5g=L|EC|Uf zxRp4+(Z7Q0N03a=CNu6!lZFHp%oH~eZX&o972Bx+RA;1DP4lH|xrG=>PH*3mJX7#aX-1VlWxv zm-re#V6fLgMOBqubE;O6dE|xt1 zLy&QfIcX_G?vf8TIBv{$J-95+0kO!gk1Stl$()a=@&A9ioS!H4pahP+tCKfm$ zxQ(f}RNNc&k2gQ_+ESwRd9r9CBU5-9%cbSGbA*FIxnv?wTKvOrfOsFR%q4jRCuzr- zM<{jho=hmzlP2|glM!SR+j5uRgfepbMnU~7bjJ)7!fJ@wI+}SP*n{7=H-j#Fjs0-( zSsSIQLC+TNZ1b)047w(FHfcwIA4kHC(vs^ium7;8>db@B`j0S@b|2Tq z61c^^4|G!)E@X_YD8;2ma)WQ1u5YjQG%DgT*U*l1*w^5jlNZL-mXYA^sSlnd{ii%1 zHBr9YD2OVTb(wvMV_0lsqzi#K825`wT~g4^CHvPdAKYk#dn!64fY+QMZ4b38I5k|M zM--bNf5}F~-eX(wI7vR$vdDRkoWA}UyDyKsmnGFRD?gr&P*w8&_$)SQS@mebu;vy^ z-5PLm+#<_O;Q$|u9Q*-JaiwI(Zi4w~vB>!Mlc$aRk563@wN|Xl0r}lNHEl9jK5%b0 zZj>F?Mf|2HS!6ZC;HA{Y$Mz&XIK&4s0M`p;% zF(AZGKg23#q)#cyxvVh3CNzf-OAsoeG3FM4cnfhA(4M631du`oV$X$@7k?|5digfy|koUq0j93L9AQ@*Us=qZQN^bU|!iSnf4R15o}G8 zNgiS0lVB{j8d|;Eik<1qHLXyjp~GJ%?XY$vW3Sl5_(^O2Fi{ zA>T^LAg`Trh@og?N_sWfH zMrdoILXny%@-Q9e>P<}Wmx^p$7y+Fc^~g^qLOQBBve;4THoR4Ytr*c7&AVD&z-YEt zYD8L7&3(9o`=LXNwznEa zI_AA^wKlm70|zC&6Lt-v!kfwb-_=AXvC-qj*{GS}VdDxsmRmx-7(&=jN~gc=IsN8g z4tQ%C?FCt%E+A~h;lyPcXcIHn>*u(58dX`b0dZjlt7A=XGyzs}<{Wp5Tv%OxKczx- zazNbjq?|t`An-;V?*57fi)RquYM9l-mcZQnLsCjs0WT10OKKKP3#p=PyfcQi{Worj z$;*{RB9x19D1+m~$L<16pqBS5%_M#Ygy5x9Ig0L&jg(2hA2ePtEH z{f;vEj}6M9#}XYF{Ze@ROx!c;46VMN?k;oFF~w^a!mWKH!{S}9-V=W`80SJ7#Th_- z)Znfs#}DN8n2s~k<{8C5S^Y#!jWec`MZ`MB*1hGihWy})eky>A5kqcgYuA>u4E_|e7 zN32=l&Yk1>ma4=JBOoDvoPxK)o@WGboBrgR6W6ZW70A1!J*@35_SqFlDFG?SKlkQa<=7w-6m zo5D&nk~-?`#iA9}mj}G%CIV#P6Sjec>qk^qvK5_LqOV?mPaM!*&ge|g!GMa$f&d!f zz9BK5uQP5Fu#b6o6U}>MIJORGQG*SrU$@{U`f(9XiFaG1!b;6ZEir?+Xg7J) zcAHMbUdF@hi%1TtkIE3H8bG>z#|F7(I4UQzy4U=v9aT|N{VBcz;+m6@&JG>o>Pjxp zeO*+2Gvy^UK2b|WDxdVU#*|OtepL(N1pP*}=ggk82sA_eF)D_6cliYLqaE*)e`vPR7HCISsD2r<=p=PNF6my)oiCSjpykZD!vhmsRJT zeoN>W-ESwbgZ{^XUqy&bd-6gx-Yfm;KE5UEC*Ca+(bU#~@}?~N0MDJDus=q%<_nW zbrUFx$=Y&uN){&|O{oEe^KY!vq;F*z9E~Ed%dN3WDt$p*=gIpi0GA5cvYza~+QWYV%=9QIVv?6V@}K4pU(|Y|=4)G+Cz%PVBz=id-LbNN9tFI!kdRJKZ~PkFt*@I& z?;jxxk{l!+q5$%k2_yc@wIC_`f-4A1y8F=dH_4jlp=*_fM9C;gQqX-=!r~4|f?1k; zB3j$w)poUH3#;Ya66@n0ciOU4YCb+xaL)9vzOf@sU2X|DrrYRWvU1P*{>7ynBu79G zG~Q3B@m%h8V!Wp`5&c(OPpXaUMIgVNnac)9N7`c?7-%N`Io=)S5Xf1B9 zUCJ@XZMQx$eDR72{k%O@xA6nFki$Fbs#@Zy>Bx2Az~k`J|cInbp=!EDl>hjOW7HkH?Nrw1U^_rNfndJ-oo7VqXR$>xIx zysMK>cN7++7w#=8P(A;Te@S_OiJXa(mJ}gR8qfs_=Aes-YC@=anILM|MwX0c=41~Bd8iy8-l(#F zq0KPfim|`1F;}?0TApQD+c+@zm4~U0aFDjZ>Kx}H>ggnhlt9X8ZJG&TrZ_bI6U(L- zZD_Z9fjMi^POvghuCN@7qXZ9}ge5*wk}P)rH2!RnS@Q66ExjKBKH#0(MmW16#!)FK zR^A)&-R)DK=}2sABt7H2m5m|EZoz~*lNOH(h0(9F-tpb%LcP~p4}{P>lJQ-G>x=tF z{&qGeTkh8$4cN(&FRBrN{6;W7QS0Q(J9`P`{76dUqzCrKdWy?#>7Xnu^MmBjlW~-} z)u`x5^pcy3m72yn9kk0_Kk3h|OqTVdX*?9!)~2+cqy|=e06RJ1>_7;z86cS)vZAV8 z%j62Ju$KHy^`u7miBBiJ__&qLI~PgqGoxMFO55Df)_-enYquyuyKNLCkFSXh$MY`BqL z0yh5yq{ri?^A#FleFPrv8ZP%VUbrhiNQsx2(KRj5!t8)80ucE?&4j*o~ zDO}|($Vnb~^W_5}KE0ZT*KhqgrrcQ5FWWTt$nc8`h-z1L+m0Kw+cf|=Pu(iP`sx~~ zCqHI=vA{X3@yrf`p>-;^T*DbdsjQn#pwIpXbZeEb=Mo!QPR_o*J1Dx0M&Qh z-_+Jr_3<2(00PIaNd)i`Tc-C6*h@=MlN{%8UuV(Uh^(VJvt54rfEd|c)gFj}^DDL+ zYBE;8jx??&{=8TbF8wF$(tDjQzLo0IYS5IOJKRE=3)&eT0l6dCk(bIKCQkllmcIO1@UQIy>XNl3}Yd$_=Y|M=2HOzB~J zOaMEsn6M1x0#9XRWbj$t?9>irCM}RIG!ZHT|F~Y#)MHUt?0? zd!;24KRRA%6#b5kk6Oh5&-Pu`+kHE#=p-^yK5E86$_Tn5f|Ef!iT<-kD}h3yHwO>5 z^Om6(_~>rIBI?XbYq+d*Fjnd&0Yd_R2)M`f9&yi>T$8E~XGNVt0{{AHkGgM#bza$9 zM!?{F>?HAVW~I30If8|ubb2hv7%2zI701C*&EOfLY+)8X&OsnGXpmcvn}rjo3DnbY zsQae++JBsLFWgHMhZpAbx4J9lZjZ)FaYqC|FH|HOllSU#yRf1TKkfI!VOmxVib)=d zy?%P+DWKE=UG%)PN1?`rgke33&=a?~7?kw!si3LV)hkgF!YQqKGT+aiCLVI9l5=(B~mv*;inWJjrmeuLN&9#_m9SZK)9dH($X*#9w=c&A8D2pt0Sl6W)Ru6EqG zg;qXhA&$oD4(Y5?Q~kE{&?>X_U}`esJ7n-Xaed0p!@v9h$_?I7R=k;nz%T);t_K@u z19F`lGMkQCA}V@=M1cEO8o!5LztzJ_IReP_@EMI0kl8Bq%wG^zIINW3l(6syO8Cuw zoPfW`dS(lF*)@L}81Q=1sGTY7vs8AW98Y#H*-QEj(5r;Yjbr{DWR%~V4$tvhOMYCA zi1WKr#1_T!f47Bxqmv-sn$T9C z9`Bg;;Uo*`*zd^nUpVUOX=Bok?iMXhKoP@81Okovxl1Ai=JMu!%H-8navL&u5uvTf=t$-if1> z7<}%wNjL~HcEJc^974GJugyuzIg9TbG-zhaocJS3NQoDHY7!wSKz_5?hc|FkasOkyefD~}QH!62_TX5iGG@$mZ zS~;bUHX`pI3;i$OylNJfKu?j<>U=bBZ+FuoP{;4b&*PhdpRFC7GR*g=aq}DT8Je*V zkw=>KxkqYItYxxFgq^Q6=;%>Acu?A8I@1(Twez6pDczM8(_4w7$(M|({+DJON|gpJ z@x!71e;z2-YgJBNF?5D@5rsba8Q++ouTN{S)Pmc|Bk){+KB>)X(@;`xdnuYT?ZfLJ z_nybfXWDv0E=A`b`<>8<*(V z-}1B^hYwaF65Pfg^}nEx*GVRpr?se2zR9VoEYP9`G2n|!XY7x01#*66q2*bXBIs(0 z261n8^@!u0NBICNl2gURY#htv9Uy=gnCH66w>g6Aqz4h-(j$MZFF}`|s^)|HXkGA(*Xn{aGtWT&{F8Vre4mvswQj=Y z1K>-2-L-rmBFI-rg4b&rBKWZ$FD)dqa}c;Xj|$ClmV_VvXisWA6%)llP9cY!N@brV=8_W7f(q6HT4p&q!)*Frxf7WQN!O;}h z+_Lqtx<|Xtj;|(P>=4%t&$-rGb0H9$5RTc#c$Q0AJ|~L8tX#BKvU<@HS8eKxvh1+m zZz}aWp<>`N4dZ-AT1%!l<~m!Xo|NZSyiRi?eofqsq*ec@p2Y}uHhDL?k*kl7G^XwQ zKauCETC`!Fs$XQ*!~O?rL6pAccpM)D7!?5520bwvonpsrO@719Lq|H#HmCt7fEkSH zt*p&wAF#)mmB4?o(}FHFf@wmDlQE>j=Hs(QE^$DPysib$!Blc3;RA zBLl}#CmVzO(QiYMq~p-zXp`061AR?{lda=L=7jy@Ga$gsi$&HBVP_wBE!*z*s5+W$ z^)pVloAAImB3_3h>mP^XfwcrVAhWVjoj;Bj=_J|mh-e85^&SU@LxE9q=(RuKSzEK* z^KW{s>NV>1BsdHtre@27gXhb_O}k~w?o3&qlp&K>Cd%u-uaMNtfEwjU!8&HF9qXEG zHhm44*Wd06-RHmCvI>G_aY~#aXraIwqNf6ZENl&OdB#YQ&81`n;z^A$9jiGXeI61y z=QXR&BBRPQ1IVfe>;%C*>V%mWT^tD*c7kKW#+p%RN`#qMzyt=G)FF>20O8p-&Mm3| zWaYu&d2BF2Qs#?u4zZuc_9?Z-o&GRHDwz~W>Z-ab4YlCb|((jdQHOCqZ0tg&G0Z=mYhXMkaKmFAP zYq%Go0wBQpi(Y5@QzAJyIQ%In5|T~RGv;G&*|J48Z{Do7K1`C6lg-cGBRg9SokNKM zjmaR{v1KA9rjRq)AfZf-tShKmw3%$coK0?y)POmZQmAu@Vu5TkKl?%VSGtVjR51#M zEJuAMqtt3bYLN_5XU*$yULlHPOUPK0DZvq9HZ5{;(=FEL>@$)J86fY4&_dS9#!y=c zL5aBfx{;2&Me(v=NgAEN{^9dZ;)-W_Q)h>EBQul4v9JADO>k= z`jhtGA-7xSmCRBwIB#UsAOSRb;#dJQ#5u=&7Y+_b3f34IS*~x0rzZzKOiW#sXw+DK z(2#HVqOGz$^(eEv>=VJLOFt7i|1I0dtk*wHX-EMCim`z-_M%=J70o`$2K~L0l5{e( z*bUYcK|u&P>k}lcvwtOroUaBw?RJs0 zl40k3meF$`s)3w?LoosUKnjI^vX+=s(C#vkGnF7xJHBd+gwZIAwG4SgMFJ^CIYMa) zeG&*>)$X9rSF_tN@b<_@q}VnipV%Ju%LAkfJGOnPlJAFgEf2BfBO@Ntb^MaCM+Jrm z`p96hfgUF|3fn=wvOoAu2AX~v*j;Qib?&@&`EP2+`&fdCox|pG?g(;n+?3Lw*zuRn zc;NhUzUaX*LC+0YD-oi-G zV1$RuuIwAgjA^24Q|{Z%$l#QqXQ!58VG_merF+T%JLgbFm7dyt=a zJ2*J}*(iLrq<~z1|NZxS9zXo>!#+PZYt}6AYKM{lNOo#gp2U@jERuM5<@_CzB;_Q% zKyR8w32w?r_qd@tlwR;Xw>l?~_5F*jt3Ks^D@CMsl8h@Paqee!Ob%Hf96Y4g2~BMl zm>R~u>Zno|N#+A5iLA>{ciH*SIJ9(`Mdpa(jARdqcD8Pt+HVgKeWc)_iL4#mGl6l~dk=3Sg z8Inl$0l6s-JMu>|(>S`U_||&r<-|k93A9Psz3rUD>q+uXXfaADpzvz_9Yt~>sONM<(1s{z`g9$mer(k zToK%N2v{NiHbFN91diXCh0VGpKu67Xr|Wd*bnQ%j-AcQ$U93mm$2v8{TlbW8M8hkB zZGO;m0Rm39bciK(jJ_iT;wX<&0-CR6K0oX*iQF-PeoxdwGhWk_nFM`r1P;8;>ez6| zF*X3n)M?t2!3qM;lj&})4j$cw>5#tg2kYD+Nu?QEOeIT{ajZEGWby2C_kxeFZ#(vU_jnBo2WF6rAaSo76@;lBfGkO^(M7D*Q*k$#+u}yW_3)Jad)#O_F zw8AYiDXP0Hk7*<0%MFn1I!gX&_Js5ja$iH&*`Yd)4h}~MnsTn&nI@l2Su4-}x>TP1 zZHWw>zu8Q~@S{eB7`dZQ6Y0{hvc&0qEJ2BN|JF6+=3b4ZT-l-^pip=oK|s<=DF|eV z@+e5x>`#>}E1@Ik#f*ENr@0l`+ZIY9V|9%ibxMfsL9)}8prX8?c_M3$&g>9R8pb7W zl!EXmedwAmZh%u`m&g`FjDx}PQYt{bGtzNv3N{hhH;SUJN>sbuZUdEMy(9D(Dd*w% z30OdgDf{7^AT@7EFA9BsW9C<5%xcCM-b6 zTbyG9tJrx|J&X#AsVHR@eJfFEo7DcQErm>@2tt8Ce+)hoBG0{rkz}23(D$h=Kh$6( z$A*n1@XOB}A0-IbUTXdw92|-b!U`y$KF+ah*)qBAy6a@*$dM8gD|1VoLHYOs)Y!>S*HH{klecEY2~YIGH6R z-+UIGdf5g99%;Islhw7)k?+#l%ht3!>DQ{d8PV0VNfn6*4?zM-325k9uU^a&NG2wXR8G8UI-5{&o@?0t4z429*GZrq@Q<3kgVY z-pC_CYEwCf747y?l>B1X=nhV43-*%iHrs||n=a>S2PwVj_m~YlG9R1o6H<(A^h#jZ zPXZx)rvye8{b=_aJMY;}g1poIr{C9dt3u*=N^$5mfAtU6mUGOZB;fl4`;w?O-2_SX zcQV`T8|T8o!J*ioNHWNtfse$;eAumKJh&efuy>~rzzcq5Ud3<4)} z*>C!D3w@1#=1+H887di%OeOGT^tgY2(!L%ipM6If&aBlo7un}HCp1@LoEKR)WbtI$ zkg6{HPS4Fk*^-hYS$PFAplwa5RxVmP)vr{l3ARJ=1Z7Ngt32&iJJeF?bX0GP`l71Y{N7O zOIjsip}JfD*&y(jm1={1$i}IuX2c-``m{%^iUz_;?J^^>X!eGZhhPL5XzVuU0HRFb z0kX_g2*xQg>49JcM4F%m_7SxX8Dz$JF|v?ZPWO%GX6kV7Q~OBPi!8{cN~Zfit&XA| z%eU7l>9429TwA1xI`MS=RR=h?ZV!oW*j;GUoTwybb8@D{MHhixaBw&Xkcw~8>u-C? z-ej7~dxu_|8; z&ElUdp{B%S>{CjB)%JLq*-DK9?@^GwHVwc6?imtfGPQKuh6JKOL4`y1@)9MAY$IJs z`3PP6D0HYX=Cc^-)M9|`NDjefyTuL2v9Vzg?eQFvCCCE2j?HST- zNGc?G-Bh)=RqYIHls?GrM=3Ffd=j`MSUTi3Jx}edq5u^R=Z_!OdpjGB$$2`13$D19|9TJ-E&{F?^&Yz%jbo%(>%|$F!2@r{8_fneBRe0 z+ZHN8*YECd!G3;sgPyNvb$hcd(f{@@BCFNTJ2*HLD-J=={bvQnN(?nJo z2_cyf9(%G0>JTBd-Z;&YOq{wYN|JH1sDmazL#BoIn~vW4Gmb5_&t#IwWFQZv_8+GU z2`B%O9Upp+$oDr}a*cz>NFTOGL!XJSTIZN-I?g9qC?uv8bRYac3dw$u9fMT=pxc9p z8#1oWG)^BG6VKs;s6(R3icxFM`-8#fMOU!InsHsoGs)QSo)h)C8tSO7n=DyP&XkE& z2g(D#ZIaKYtyjVlCKY3gxV*!`VLu4h>#tmNWXN72@%lV<*P#G8U$Y}k-W;=1u6=)+ z-1^B(8M}15B&B8V$%Y>YjQ(1+JxxBIv|6ryf2#cbtGV*ayv>rF5kzFkv_cry1=c1o z%v{$NM2`R#-M}H@)YU>7u_vrYnqhUmNz7O(vZkgXm%f+xLjajpfDNHLHqv8=r57;d zU+!(#6D9(%w!Oa-JCrjCw-PVWNG`!NH+eph!q|ety2> z=H|+{apPqB_U-0p??J8S#EBCnDJdYSaVYjE(EqmX&Xh7rvWqH{jby4?T$pS)7LCsU zBpc$v4a?#90}?$XhBO(XflSkWwiXh(A+^Vhj3HA(MuVDVn%m&aL5j~)!fmqK|I)wJ zunr&&F;4Ohb@~7qa2#hG>0lru!~y2>Ao0{};w&Q-#}OmbMOK^|Ycec!q9uFcX-di1 zpvFKti5i3K7z9339Z$CBAA{q^R&@&5J~fU=Sv{~V#@~?D=kI<%*7f)b)S=HC_=&7M z;SSkerk+%<5G$SPRg@UD3C*gP3%uCD;n*T7B22n9sw^ipsUl^3vTq1BB@EPC2Ln#B zE-_7>8n#$o7_m%duiGVKmhF(+Kc6Mv&)y)J*#RymO`67^WBm0eeQEZ+}f8DV5ozL>xj)Z8jO|P=Ku0K2guN-tUo_ zlN~46=z*Xk(Y_FuvG zL+=cNm7ze^J{X4ND{1yVn|>Jn?~VqSp@1TSeQ|JbC^je(l0AR^e0lZNS7q(mwer<$QTfn zCIjhk{DG`BPMqghQI|}U8b~+ebohH&b&$wZ(+G!51a;GleL?bFAAZMmEqlK8)N}z#k8qO_Q_T=>vrpF(? zZh|yg{e_&I`MX@yG+gfP*Hk*xtt6F-V*HVV!yy1AMWd?mGN?xbxwLa#>C>W`oZhOs z`JJxep?ZjzI__18GI2%XUSowB{WWy{7D-GC3N!_QTh^?#NistpexxZmA3SH1Y)iR-TMkvET(!CdSz4AQkmcSMh36Oy!Egtif9Yw`jrmq5Pa*xW!2b45qF0oJ8lVGrI zo=?-ilZ7RiR@}fon(TduuGrK%Q+~s1{KVA*{UvDjMZl0iBX!Q900GBdLTKalvzSrN z=r2@4Jg}{N{`SQ-lkI`#W(49r5D;OuHs1?BV}DT`@cCR6NXW6#bTp@ip1^IlTdkcB zrI_RUz}ET!!N&g6DD9Vji2V3hTP8p)xEBDe*24Z93`l;NeS=C44i0}FiiBj-g?07n z)sm5sA)7XBk~M4An8&(x>m(u~LIw>QBz5Zs8vHnv2<%A9lJYT8MxIxwAWEOH%gA$U zD9MIk??^ueB%2!?^>Q?Ni+sRB91fZNg@Fb9@!3@EJ#1jI~~&fp#B?Y5=W5w#g13o z(J{=lZhNWS7xulwm6piUSczljK_emkys#b1C1&$9sXF5YIU{zdw5t^-H7mvxzD;m& zI1JFZtV^Q=xwn6FdF;Fs^wC;w>(fN4#7A3x76hc;$hslb!Jzy2K9cW!P#_C8CriGc zp$D*XTdE|d2i^}6BiossE@rN>KO;+*aMm$%?;&dhwHU#LJQ|Rl#&#_FLG6y(Cq_Z} z4~BpojS&|Qu>>BnNU+WiQ_iAmoNywKX~?`ECS!h!{#z;V#NmNk2-iLVqBV(Wrq>3|ZXFyPjt*f(F~W;X zby`}QOq(`Mnm2DQ6)LC!P!M&Fii$EbvLV~W1#X1bfkDHwTR)v?YA-MB5F%oJG(aXh zQ3*x89OSF!GQtO$;o`y8+3u~!zDmh6$At5TM3sGq>_fP5av}d@@;OfAuD$M2^8dR2U0>Hq zk9WX-ts{S&z>V-K9Uzna?aU4GpP`E-C3A10ePaDe^6KRsq-F@2xgR4JZ)<&>4 z39ymLW(*Ya zLGqAWI+O?yhuM*mCFP^_W?$Gq>PB={Z~hgc!v!_^*0f^~K_x0q{S<`4AHjAg{lFok z$&OzN=mDack!T%S93EtbtH+8oSK{me5d4r zs2CdbAS;9b)1{p3HrXd+<)&UUM68q7;v571#o;4!#n>M*88}`Z$i$f1LM8WPuNdzG z5pK}KN+72yk*5PLSu=tHbQs6s$En4UMXoyT8T&I1D96QdbNtoR`Q`J-^0Qt51R&d! z#X2~YEVQmwUb;0**h_^`E+$e2w5ciO522A?yE+x5OM|@$Oxu|0usZl;a&{=Dc zE@aB{$fD|cf-pmdAl?u`)-A+t)`zy{m$H*igKXag>=U)Q*a76y^n^h9MK%@n4Oz}b zgYBp)CKO;tAkp-PfQVstn9UE7Cv$2VrxiD--7~YQmEe}IE7C@7>*h#Uau+SF%5wqJhvWg{PVC>cQOfPAQY*`grXN@#FABBBI;r($%N zY)L;xG9^e0WE(=^aQuMljjT!VI4(d9BlAW*GReh{vlgiY7RSn5r1}~hF&tN}ADpjp z)vc38GcvAeM(iN*?tZ6|-gE40aF}qQXMJk39*m(u#)v%K=j>bP@1C3#;8fwd3y}keXnC?pu9zJdeVub12;i5i%+^Ap1-`b&HziyaJ}*bpSjC{kEYWFN_* zLI6!RQr9y?v;9@p4t%Bm{&Kh72D0d$O6(wCbg!m#1IaVnfv7{wr~#(*1=S1n+$eNV z$FN<3VM;#p!^=o+c(n1wiWRV-1O^-T5t*%o`ZFcjQ`B~1AGt3>ez}^+*fWX@8~ph| zTdPd4lHeai7n{c@tEIzi=6l(1BJ-7)Q^w;zX3i%Bo^w3^OFIu6G949T-$SSN4f_2M zH0ArD=AFJ2ov#-IWwpo$XNY`wwr=+Wkr7I!2P?^@ z9-C%(NWf`+_xpeJ+~{$Cd8Lwi{r=z4^YwNwwew1}85xO`JU?&0>+V3`I=?Hq0mAZ`9n(9gYj&$RdTNmJhOtY!kAK z)X>PScj`sIE%0Im9tXrby1Z>-!`QD=?o;RLadm+IW^0=1B2K0SB1<+3B8o(D#KYDR z#<`;=kWoHJ`*4OKxeI@=St}elh&UN2r@;YLEaLLEj3KJrJe@e%TOx~XrSZ?i`Ry02vSL%CvPy~*s;h>6#+ zkJL)m>#6HUe|B*2fjZ6&JJV&#%0!vHE=e8u?2xs~ePARv>8Oni@>PE;vU92 zWsM_U?yY2x%qJOP8XD?$!}BuqCHp5~e;Tl@<<+i{4JOmiaiG+}Hlz08*s;w7#IchE z0%=qU38u7#;2-BU6nGyt%lH@iGv|qb1j;o!zOPiWN+6OTCHt{Ypi_Zeq<)@20JFAZ z6QrQcK*>&8FFEy2l7bd$Um@oNHL$PTPt?ALEgLFfOMcSt(y9l-GLxjy+!y4jPP=8= zWnnV+;#3)ZLB7OKdDaFLI1b8tmj9~fAfPxP$jPyeea4m^a6PrtdkrHNvG)KK;L1@_ zpu~UEmQ6BsRiY#+Ic7v1y$5!sXPfT#lUD4ImD^LywI2-10e9}F*&lP}^EfvS4h~0z zA|=_##1|}BAd40)lDTu|O4qJkO(q)@yNIg8ez9p+hD7T3Rv|`jB83T@2?!0OBEoLF zVoaE9Nj*l_2F?muL~go%Qsq#(;W?z8{$ia`8iL`RcDUM-P>3+ji-9^`)aygaTbym3 zT*i8lwIFLu-8J1_$$FTqivo!?nHfNqg)A~z8OU;#rs`BFIrc`y;53qHrU4nw9$jxw zyuoHz=$=b6qwcraabUck+P|3}=;uEu@~aa4@z2?P#epXq29QURL8BfVIXHFIka8S$ z>b#LZLhe0q{nLThlcsx6r?Y^4s?$~XN1b(!pJr)iy=a|aoJt3W0J3xQW%!~k@_#=r zlzZCGYkE8-TgudztR@4bfGjmO2U1NFMh_6I z#lKidGd6~CgY&+zwcP~OC^bP^JwVq#nOg#P1mdty)8A41@Q|$=p8u^0;DxDuiYmx1 zo=pm=L8rl^XLAX7bId*Nv`U@kXIa}*M{OUo!9CFDf)X5TYN(zn*g)(eWiyS`UXk_Y z_^EgHR6R({k)G4T0)p&^A)MO|9dDUB0%fyE3ZjbLLQ>b z2|~YN#X`C9;~Dbo@TI2v{j#koW)A%ApU;#*A552jeK%i*&EKroIYHn*z_`0!&8h}| z`vVc-VbZ%rHHkRbHN*WhNAG`TKb>Fi;NVA*lI-)(KVL?T8YNFZ{j{`f*-~1yY9;H} zuQyF&j;Kk7!=HlnN$EI(3cob^6(Lm1q-WBFjUAA%Y0)u8;ygi+xkV zT|kXFq?{%|)G#t1yw3yHi9|l{;NS!IiN}6kETflfGt$#7yEEnE$!q2FY3ufBF!pC6 zIwD-UHcF7*N-W9-2sMPFN89tO!iD6k`ty_4^j+_PVml}!doWM-0-TlBd^ zEgx!06ziN|-X3L10AyP|l@;rmx?Lr8CaY;iQMI#;zjz%|YJzO^f-q$u0sFw;$qZ+v zNVwnT6dUM%5>TM7+5@(TEH-5^(Q!8W3>lsNzC9PgAj{q3F4J(#*fSIW1S1>tvg#qE z-tDin!AGRw-0wYbUhzS&^^kBf%GgJ?f$e4+5z$Jnn@B;qni59mZvv-4$z|BONfNe2 zi8`M_2YC8f@SW}n()yvFF(oCiO+Px$NT*)Mah0>AsA&xeNCUVh57fF>WRdihz znBN;WZY*WW6c0y@&X_}n43TTDxke%*U3MlIW0r20rCU>^U#sdSa$jhGtk67)XfZXd zik+DSvMRYiPOBd&aZ!c%S^EQWE~3tmplV7){6ytA1``6_dFLI|sE~;Or2=N&wO(&z zj%R&j3Ft0$LTk3M#F%Ubjw?70ZrtZ=24B<3ELXi|wlg;0dd6QYH zsE#7EF7B zB*g3WQw+d845@e1YVp#fYP@u;UrGA5s4kbBSXWxrDsNO4|{NAb5p+@q@M2q@PvS026>y8xpO=65B$ini3K01k&m?`m^6Yfk<7m zjL0Hm3kfYmuqK{)<{7!~zWa`BO+d)9Q}jHnv^EaucaPg`wwZImaZ}oY-9gnu_jM20 zLrPzGZ7sFiX6#}&`#S33A?ehmb6z?B$hWbDeh@^hq3>Ow=aI6Qs`_1EyOH@f>1%sm zsJ$*wKOt=O?-I62ZD_Fl#pcqQTGaB%RWNErp=uprbMH*S<4e)z$Rdw^t<#V$3D`*DDcYOFEBg)TMYhDW5Oj!A2q!6&lS+1+%n6z6hyam*wA0*(%r}k`9bw5-ah;L*CVSyY99dWF z2NG;L0Ru=aua`->crdQTj1=5Is5*$1yq zRGrVPoE+I)zKy)Grk13&y+$HBTp{7e;9bVr;SiCM>C3wLkeZ!q#sU>5pv-Dfv%Fl| zxt`o`QX{!q2?YeZFvk^T=jNHY*+0+QB%>E^m9*p?QgPY~62?q^GP0~!NH)#7umzB_ zc9+{=7Jx)U%8*M_+Tz(Bf^?8C8lXWUAzy3@d*;2@56Cy~g$$bB3&ja~U9f*lLqHl& zHko5$q!$@xjtNx{=Lm_sSJJ{5#kQB|d8=thY!N^um`Ni!9?O*MVmEm|1RYX~DhK5n zz~)w-4j9L-|0|#zV%pF3@wVq}BvPtJ)g! zfI`pAuSO9?5Y?-v_Q3Z{Q#|t?y$`UzKz-%Z3e6<*#9QoVa1U5HMhf~osQ044=tQ;O z`E|R?-0By~@HLkF>!od!84&7VP=B48nJ2+*5-6c)Tc?6t-MzkCe{v(a_(Zzo#~hd^ zQ)Yx3i~tGA3eGX#0sRAvI^)nm?}llscglA&H^`iINs^kCW4MBYLrF)Gl5A?RPe1*1 zL$E*m@Iy&YPd9RLx{mX&L&?DUo#|3pNp_(lQ;jmC+{hpXP$4=@^7Y1>n0bs0EEz>= zotT#$qSncwlz_%%G^oL0^MB7srRnxdum|S{rx&>&S!rH-_Ddq?ygOE8)LZ2|d{tM%Lgg0{%q`?sfe3 z8vb;+N~CE&br#jRL}H2;guI>6VHbZOa@GqXHPta6__4^Kp(0o6{&cy?);xN$(lWZ< zX6xU_KPUNFX_9{Sn=+!&{jy5xN`=^HGZ#I~WUPzy_TW&;KsGp(Pkw5PYErX;5~*Sa zg2r?LM>5X8g&9Ofjyr1cHW~QVWVz>?d2-j6b7bMt}+HIqHpHOy230)&)jLF9`Q zAph7pMv*~EPrOb^?=Sj?p!+`rlPu|#--}$WukEk46W@VgC`u!Oqz!vn;)g;Al1ESy z+swZ-6hnqS@FTU0y8WBRiJY%?9U=$8uHM?p!?}Xl2KNDoGgLvXx|a!vV6h*JF~{-wC$u)M0Q z-))0jnK}8UT;u%qwWVvr%4W2jhbr-9T!L~PxiDc625RR(=9O&%P)K(2m&n!Gh;l_aHy4%#_5lq3|X7X_KV^2#gar=NaO zTNGv@Q%J~n?%XN6cL&LK9EvU0?aYwMafPfrAi_*mkqjL5PytX96r=Y4m}Rm7H{zsq z>V&v!veW_zC=yF#ggBo#OE|W`%Nzs)pPP|fG~q%1h77xtl3B9W$gWX9kiDkVVA|X2 zbS$>c9(8+UQEW@Wc=WhK zSv!mW7IY`s~#^`bgd^94q^6mX~Kr2bou9Z z^JHgwwvwJalg-a42$0@jcy@vZbDqgiqq>>&iWDFZrnH5>F(Vt2hJ>2m{dSA(k_n|j z7(q8mRUnbQ?zN2_QPY^$W7LWn2y_!fL`8(;n9Ob{Ffx!PlFa|!sU#TrJ2nz~x;Q{; z$?y|^z>d-p{6e+M)OXX1f($q3f&MFGU@04lDQ5{g!3pdy;|fpuo82e6zcZSP*JE3e z=J$Qr+G5lMoNsz)VE^%DJV4wb+SA^#$|XvakeOF*Ao)H2F8TVt{OF33U!$FDRH8g$ zTX{L&W~vZRNfRBLADZU2Yn- zTh1PmFK@1_BGY!3l`W}xh9Eemgf;&S_E3s1f@#uPVKp zS2LOJqlQrxgxC)EEbg&~fQ;-sd11s-S-5ex2@J4Ss4IVa+$x!{eEVLt;|>lb5n)Pv zi|i_<4*QNf?vS26drG%%-DKFXVe0FK%U}QcSNX?3{vn+^cMiPr&&9H3%jCN2u9J}? zM;eK{!ymxNwR?29X^e4FlLIutI9wpBqSlng8BLMd+D!TG>;n04ezu&^FiHlsE`scN zC~!mM22WF`mg=MwM@QrsBRM%)x^(FxvuDpXnaxrGNU(9zKCyB;vPO_wGD3`l35;7Y zz)?k(H}gZAJw~=oT{9$|j`TQLyVk0g5NM=^qqlUZKDW-kA19A)R;m<}y4m80l#~?HV`RgI4W@ag z!@fw%$}vIN$tx43q7v&)4Ju3R3S}iS&`{}k!Gsmt<*Ij;T>0?V9#!P_q-P{%&FDX} znb-sRK+u?r>?`FU!d0o@>g^=<*HO#;y~*$G>3aw6`tn z^a&SPA-(uug4RO*%KIBpEsRGJiv| zX;=n1j*beKq_k}5RA2w5WOU=M3~67tqO`7E!4T%PJJWT|?U00+FgYh%gV)O_Qlupc;dWIcF}j255Kpr_52(45atu)!LA4Owt9 z*??@eIzD@@VgB8|*wz{1q*C+UU7hneU)aAZa?971{J3gTHzCduY=@&pTy&I~JAK_r z4dud)b)`K``3zt;9C}|jNMF4P-l17hU0|I-HZt9Aa@Z{%Y1&gr%bmSF; zOGd7qE9_%aCAW}YN_eoz1P^-5|{3^9R$NeBc#vCWWOqmom5 zM)2HE)*;N)U}Kjd#{}33R1kRdfQ>I#OYQ9jOW66@s|6>Z%Gkuwk4x5)-z2qMV?Dj; zQo#5wMkG@9!}r2O366>SayphLu8^?oREbzKM&>SEArFsDmB0P8PIhJFO7rUR(z9`b zT-Ld+uAOo+Yuzq0f-6ReHRH1e&)F!yF4!V9D#Vz-HLMyZH}+~IJ(^T86JO{K-b&Zp z^(Qrwvm4|~#6-O>`2)uKbL_;YrD06UQ4?%(a=SBfWaxs;^2xFYnbYDQlGU)Uol3#? zMV&m^{zD+x9?C>U1b*5Zg_3P|aQKr^q$K-_E3PnE;s+jhKsImQEKfY~guM6Od(xvv z52;?d30N=y;8lFCtTCJ0|)c09kChkI(+hI^Cv}BFWYkQgcX#nhbX+kip?N zAmQw*f8-0m*+Lpl5D7?o z<;cdc>hgT#jWVpxos!*ZpcsiVN*AUa0KNa@>zd7qttEA9){@HWhD#W8_5F3c1PPN? zTbqPEp|drCVl&z=2-t*O>!d*KNkK8}Il@up;R9eJ$vk(u&IUD^kKOfVkyhs_!M@1a zLb_}RV~5&KYON{Zfe=HM30}~HgOGyP)4~tViIMHAvjOSmHO3F|0iwQcq8*urOr1c7 zPXe7^uBNQ7)Lq7gb&=^cE|U!vI!eZ=Pe?)A%k8~`?GY>`yI!%bE!$BnT;{>MG1fB?>YPo1rYC?lT2Zxf3A|=`F+qai{@4Z(#cI+rO+;D@`ty|Z~ zvM;~nH1kSrA{xY5zf#fuOJfKja!6DGT}MN)$JLXm>TRX8sG+A4?A zg&j(Yjik>9oL8jT)MqmShD-$nm>NHtmO+4dPu&xhH0w2j!805aGIAt?Df&A#+kA!}bjXFI^LM(>;-o|cB>Fg)WLgLUg#sCNkFX;f z4X*j*cbr$ULpYsr)g-^|rIK^vO_J8<3CV79ro_gTE7Yb>4i3jVs+Eh8f1KV-dNxTg zP1PV%S9hza-wa(&5Zt28{#1%N~)zTlTN!wNTv0oY=F!Iwb-%x zJ5w=CgDoZ6kWUC;StZ}WU^+m&zEcE$9L=b@u}qYz}U$^_7wZb>#^g0plD*eAqpj$ zqBieuNk2939j}$GUGA4D(cL94Do%#23zz$pkKg|DHkp`zf~40tQSzzhMrA19y zc64q`CD|NSRZ_C^O{SWT{51ZelQ*}NNh=dgZ8G5B$*U42IU`3-Zdz5c^YYElzc1Y` zA52^$8QFmnBtJNoKyzZ$SsTngQ%X2@bB4S-KTqD-N5L)JNh&+N-8q&tn8Xkv|fsIPCE5$|`N1@Qb-2{0o2qM{JvMZNWlATpx9>)xj72wSybtru>BWyxk zQ*`CUkt3r`r&{W>myc9(J;P?AnW2iK+x>P+#!nn1a>~8-XEMcPLU8gJK||A`b^0D} zE;iC|9N>lD+iZETlgo9--)XLcyphkrK|=!SfzG(p2?lqzMFLI+htHuCI*vD;^8G+U zNtTO@%;+a1Ikv7$ZFY}5mwU0KG&n;lSFb0L`;qH8I2`YwCN^Ek%;C1KVOH^}Evm~? z7qpR&uInQ2U)x!(>)BAoFW+Iv$?_dpGBmlqWL57Fct-$bln7UAAyI3`N(8|=>a+>E zLDm4%4Is{BuUYRfPi$6!5-#kEPZeeeWZiDcWD+DtA|#(}2mVxS0J$bei6omK0dtob zrA1(K?Ihbv0-}q}4JL`yOf!a%IoSju30NQ>CpgMES@pX}s|x}z_JJs&Z zGNa+ml80Sqlp_IGpucA%w3fN~jbv*|j$F{8whUjm)ey`0=t$X-nq{sxKd8TEyj71T z39>US+mLK9a`9HvAdE&~`M&00krnGmZo4Z3zx{!%cGpN=+@WWfLm@k_KxV8-+NbMx zR&Kt`SeqnkcBCElIswe|zNL3#dFPr=^6~XunLV$}iITc!mE ziH{0120bPF@D276gSuL7+K?oN(g`xrI6lEZZpigWvl_AhG)VK$lO-U89Gls#W1bTE z?ICrzMu?pKf^}p!;GpS+>y7=w@x|Fd#!ZH**M0hD-S!G~`Y!*<)~=EHrV}pG^m$*{ zObSjj&Ru~zG0e6`GD#4MuISX0pZA)rCFFM`l;^&#`?kZrm)We0=jwKct??w|rOspl z^>JCrlApFycJz8&o>)*xHfF`jlnvPu5g8>B5#i2}ac~GiMoz9Q+ma%W{J2nV{CI|Z zIc>d?9h;3jR1lc1mk=l28zo5RhLxqN-bX8zwOK}Lalcy=E8`mfQ?goLZtsJY8%gdN z|CKN#v)rdCiJ%{W5lNE>3@Q>5ozYI~>Jtn(zb zg(qHb@8i@|bF9=w6NqFCBOSn*t2D z1WDUTLAg3=lG6nRQ+7g2*?8i8^27k_Koh^%M9EfiiJW`8UKbv>+?JXvV^*ih_bUoy zN84K@yQA(CpR13I9{0%5+IPvM&0%uS>CL21v#K(D(N=S`x#}y28monIMg@UXeAc?8 zJ>9m+08d!4LvH(QrhGYlooq?YkbK>VFui{ioP3Yu^?lU(GZpGc)@lEdyq@<+Y7nePn~&iJlesbB%(;Wi$C|=bMnzg zAIWK_ohDVQ`Xo;Zi1(q?W#{JR%J}i)<&{@nk)=zQO6AIxO;&usfC1_t?rW4doHc9K zm^yG?|G@_zNV|6J0)Gt+*Ij)7xbDO0=0-?UjY0!A)G2RRX_$$=?~N5n`7&%(j@;fc zT6)$FL5hkB$kfPW-!V;BMv=y!iaknYcX7%S7^^b%P5U#%7x_1Huo=~bQ%5$MkvYMj zCXZ|h*=`T*)H!AZ7tO^w4YE!R@n?tLAS*Y|WEh|R zb%{}O0f<1K7FFeub5D>um12)LFg17mF1hyoX-cwfHjoVFH78b;{!Qa$#=70oE`GOk znsS?6$4HaI)fU95jr!phOE6oVul8b_d9Sf2kVEF)Q8dJJT% z$#j?3^VH!=o86}w9JV)JeN84ZkdY?ij{}2k=bp}aA~Q}vl}44+!_zARLeBGK#aHTc zx?gk#&+7l2Y%SMX(sIKjQu5@TX*;EP?aK1`v~|+3N}Ndsj9V5^<@s<<+nR=i7v$&2 zT|KMH@~x>dYg>U-Di4v6usx85pu-+i~7eDcYA zJ<6BgJA=7DtXj27UU=aJIpKs8Oyjgll`6@<{`D_8^UO2%A?_eOJzc)|;tQEGXO1D{ zef##68Z~MJ{u&zGAchPXVr1=+k;jnK6>|_7WnLi-T$)$kTh@J4fMk2KhjAu?kd{** zixTstWkQ5BtrQ}cp`#l(jm@?%#Y+jiNr6K~1_?*bll}pm*i-+{I%(8WZ&N2PPv1ut zYTlQ&o|>$69VPBw_i!@K35{&F98yl^rCfE9dfk+W_fitR-jaOCH{=?JdHXVTmVcJ) ztW4Qiqo?GnBN>&pRq|@IldUyQmZW;8OI-QN>U*l~GESvBlOrFIf~|{XW9|O(>BgEe zbz_!Pt58;M?cG>5?97mBx;Kyth23YNlp{MkTV8zeMRPwZQ^uXU(u$pFIr2Zf@6BGf zEAWhRG<_h~tsGazj7>VyhD_ySBBfgSXe0Gy#94LyxoeXe^4f&W^7Y(AsZ%~ux-_UH zVGw8nzStv5O33n3mO?oQK{*1#sA(AOh5e$Z1Je&^?1)4eBF??F`U#R>rkvy|S&u0j zCsBnw1D`eV%rnoJ#K)1Wi2^0fbUhD8?oQU3X$dnwRnDrM{oDpR z>0JStZlLxOn>^(;`yPlnjVEam2??j;I_J#miGdwA;SnX=5OPR1o!WWNgy-!@FHc4e z^xSW0e5ri8I8V~@%gBQ5x$?=h4YD~o)7Vk2iQN1GLv%gD2_V6ZtH#Ur)NC2QVy7hM zmXV|^echHM*|cMqM1&Vey~re~zwQTVIsX}{vGfzEpRq_<)U7IURqDy^yeJd=tXnBo zI_b60AR*4wKvz_I4Tk8Qq2>pbY%zhRikF3{KVm3~eFme(K;Puo)sDqVnfxrNC@B&Z<|~0ICg61K=`K#-fXp(D z(5OYnK4L>5!q`)6A~o9(WF{q~Zc%%yHkAgGWVlfcQNzugZN`DcSGVVoZKDvHqqhI| z$1Ra2&`IZV6hPE+6Xf(a?P9)lgPszxexhvOxl1-FvF5u)7G1xJ4Oa9&p@xw+pHQp( zo^ex@2q1foiU>308`4=RDo1Xqx=>!PJwhMDr^T$&kbxR&s@}2p17d3q-N&H7t`#xNXFwetrjn*wy37h z=bG_a!FAmS19jb8f7?>B%r-npdMGe%t81eKdFj&j^326;b-&umef?V)iFa`uxs{>E zm8i$HLys#hEBByl)4`!AQ0SR#q;#Ks_LF+;X*-!4D?_@k^`xzf~N&zw2aJkJB7 zd@y-YUS6I|pFUkytXLrzUU;GDNKP{~-oJC_PC5JRvjeZR;B%gO>M4^wN6t5E)-01X z56)y$D^3I!DLRjB+qTIszx?7d*+E#hGhM!&xk1ioRikiaKs#4jsTVgk{;2pDy{O-~Cvd3+#vO%T|k)S}d6veVTm{rMm2Xnkg7+GKsm zb|4q0#xY4BGzeo>cC{8lEgSWC#<8V&(B?Uozd8nWYtPyxIVavMk=qwb*?AvJ%(}4> zv0Z>l3UCM#XmoCq%cBBQKu^>Z9m*>t6Dl54OH^lno zk;`T9oJ}%%$u?QFJ=FxH$xt7ti+6;s>m8}treW5o#oMH8nMhfBY3Cxw($6B62?95C=hB* zJpcUjN1VwH(|rxg+%4hMRxcZ7n^SrqFiI8{6#=A@1c==b&nNoM&D-yeQ zl$6~%Q)0HxmZsB<%ij=H8S-v&J)M@AGcT~Ufp{W)K9Q|+XD*0ySdecm8{)SyrL9gFx zdL8dCD-D_E9>V@4#6?TA67S-c!6rz?dHQZUU@GXuX_@H@^A&DFYg z?HYJCH0I5lCpX=6lc^QwF=)`Bz+W8v*s?p*bgrzX#&qK8p+qm8{d zmm`aJ7Rb$$%FENqr^@G17s|~`+RGQq!_@JOGLBl4YUR`^D^lT7sl)p9>kWDE)&%z! zhtdzmRd#Ds*+|1Z;Fq_qRYCf-s&2BCWDFk~yuj4vGP{_^2b0#wr&HEQa!~i>VC>G! zmJcSbHm@JHV2g}hx=qe)SId+Rms5TiP9_?l^rB9f>@E`yVk=o&;s*qjdHhJDsb%hd zn(OEuEC zyL- zBsFJzfgIODd7t<_eSYCS6jim=4I0gf#w0?vQ@=`6|EMKPOg(1U7$B7qIg_`LL}QgqYH%IG(v>DI!#wzgJ9*;#i=*eLDqfB(B~ zd}af^fUhl0!M`pZYQCnESn-^(QPNA_4%{2q_>8v7pC`B?#N#B2gWMg!`q$(`HeakQ z9*4zDG1Zqp<$|4{e*4S)gD(O~fxF(o9!m>Ne*}CoeXl(u9!_ zcCe-BWhZhPWlTY2tAgvyO*nj@F_$=8O?k7x_IO7c6_^gLRgNTtZHk+cMo>Nw< znonIjAsTT(UMfPkJEsDdzOhCb^wv-ieR$J2jEXt@yx`rr^4Ql8K9*gj|8?;YC2IDh z=i(2p^Wd}LMZ@_=0ljygAv!O9Th@n{yjqcx;W&>d{i10>^>`VCBCLuDR^j z!e)|;+_SWXYcAJX_Di8)W+o7wqD+yuhU+ZYyIhu z8Vy+K0HtGM^9S-JH#e0dJm<_FU<6(MPOGEXmAxio+wdoF)hF`YHe5cY7f$aVg2j9C z)u^l3AWu9z))52eUkVe`{%x5h@7vWoXw?6f2b3vSp`oG51IyZM{>w#u#flXe zJ$khIouYvMQh+l(eEIU_`1RLcRWpuh)288#H{QU|p>0K?ySuweSMS%apSoDYyQVF9 z<#q7$Jp3YkY{+g>|}G)=~_>MG1>h9Z2Krug|az(O&I z@^%`A7)NX|^_)LDdlG+n z6EJ$@cS3Vm_)TFH`>erXc;|Ox@ZDXLRqb?qsLz2E+??`SC2ZKZ?Aq}$cua1;a$J;3 zC#DEG5><7LZNxciM}OH>fVAQoczAfCIeH3OSyxQwAWK+ORGKPE9>WqAcFpG6GExNR zk|*T}WnE!}n`;K0g`3|u9N05Nz~zg0^@~Pvvj+-mT8_IemG)hDczB!d#F0|te~V_5 zBkEdYO*P3eG7X%iGF)@nERwxZD3s}t=dm#Yz42W`6jNaLn+er)lIMc!Ejy~SyobNj z!N7NtVx1}CTyK*ZonCE}{|vv@=(W-NbYR#O#*67I>on`bEo(QOpB$0Kh`9WHlRSre z@_h6vUV}bChdQ3OW|U)FCLOFM{OPRz2=aDCZjDJD;~{V@cXmfB9S3r}ts<}+2VH{v zmp-`U{!sS%D7-B zeOnrvTh;B>pB4Vd&ou>XYEhM%-+i1Pr4t70%54QUVUs%c7}97?CY>&_t@+#E{)S1D zCMoYf>!)YWo{cF}rl@~8!t3?dU;oXf&d>6?SAqRYfeCjQ5f&PLj8F{b<8CyrXZLWXLk_*nhnq+6{vRfQiu4zm8tTbtw)5v2YLl~AlTbo zHLB5QG{*+JJX0*O?$nwX`F>4}55lcedt*;t1^&A<8T$*Xas7lC<=GU1)plQ< zzcUw^B7V*t;0=$meK65E*JrvB&E_~hLXpUIWXgR2=ihU^qIk50jlj6pS!$7Gtx5Ku zjFNhj-9;uT)LQ8TrKn|Jit}V0rt^ShG%VAxrA5|VmiDluhbi5h1%Ae3!1z0jx#1Mr z%vj=@s@8iE@0L_;IwJUZ)~JUK1{VGq7F+?z)@J$6L;W+!KG;%7J`?$GymQl9{oCZ3R8j+Kdp_+@7vzTI1c(%u)UwOT<3tX~XTvskX8lPJG4 z@H!NBP-T4%je&vTojA^ljh!;q8;!-R1cNQ?EWW3>851L%5$@{Hm70a zga$2*J_=#$x=0FZ8gh-*K=wwXmV3V#e;lpD6vcYEo^xyoqv=dVr|720X1XU+5gEA_ zL)yd)3Z^;Q(CQt5?Eb$)ZO{;vwZl5X2LC=NjGT$w!MCEQ!5RDVDp6Ejj|)adBFx9_ zxS~s%_F?+C9^s`S6f*pd{d3-K5y!QtYivPwS*-}}N=)t(gqXm?mlL=;JF2K!N%jZ4!5Y_oJ}0}m*OsCwWC-^7osNx})k=V}!}p*FAIusQhQf+^?DfZG!gb}q{r7ny*)C@4>wPZLtLapYE z0+3u-t-5a$Z#O43bNk$35%A+@MXuxAZZ;9)g+DV3V@Fmw66LdgTK{19xY?I|;Oyi8 zN0S4R3M%EejPC{;_g*j}LS?70tV*LfS#)|$wl(+Ncb^iuj9ObrOiaY{&p)ps(42wI zT4;*ru7t1YrkifkHQ81aSJmT=kGEjFEW#X9(P?acU&P8fVB($f%i%Zs^zs&bvZnzL zk8{J6UMJb{!R9B7PG0)1vALdtFvDcrxN$wBCfiJXLnG#{+=cYKA~eDgsg(|J69XyI zvjs6eCfqo%1r9XwXlOB2n+7UBqcr-MzR3|_j9jyWuQe3Rqpmj$Ue$rz3{%5t?DBCm z=;EdUt6nr3s!>qW$;1?OdG6Mr{%COahU>h$jR?5~F_@drdIkx>=fTO@8O|-WaG>ES z*OxW9z{AT6P0g)HFRoFB{ov!6;j1|^bm%feO4np-D(f26{1fC5yOp3+ha|Ru@ zM1uxbetDhhdYqJBrGAeI^uWa>go zjP=ACip$6`Mp+6+S&g|FCC;H}@{2>D_o0!`^U}`MzxtZxPU_kIEg9+O%4x5{&tjQj&0 zPalSPd)sCu_99 zuKhMcLH*j*qg3g`UJ=X`-R!`9#()qt=bN7ss)Ma^Tz{Cm1vzDHBkO$pGnWj*xgyH# z4LTC$C*-K(VgtCS01wO_5{5{Bk0V%}8cpYOLPLZ(Iby8Z;Nak(8fUOE8PmWm4Ky_N zg)#TDNkzpv6*e}ZXZSakI!|X4oMn+`hjGymCl|gxdbL$um!><=D8>dqvotn%DX6SF zq%Nze)r7@q^>{C#9v>&vV0Bs*7N=C=!yVQ5A=QLN_Ygx|bCk@!`9|}g-QO9_cancK zqDM5eY@wK@(Z(*}Y>>w62S%+~7s)zn8iw8xMpPwrxgoF#8=qyaQ$IEa4ZyyR99kNWyC%D{ zX*D^bIZ)P2u6K;kbNx+OY=~vnpR-(t>no%9{bm`dQb43)Yq=Yb9ii@+w8^7py!Gs}+I16;2;>XGN$%@3`P zuBbo#Z^-cu2U!O*a_V!FDaS=H-j$lu_e#^^EUSzEt_Y+^T|cOd?7 zW?wvbR)73)Wf=|r4E{pqKl;_=5HSLvt0!NFL+e!WTs*VrG0 zRkEnc8h51blpTg?vE|vu#`hbrq=uK=>K3&ToK%=1#^@#ICu{5;is^N!#kk|+&3NF; z?do#(r(3aPZ@y~wVu4YIQPJ&p7M(db0#3f+DveKxW!5~>fTi)uF9OGNQ8;t_7HhdF zpr=0$oc@RyxtAN`cEU#*qrKQEoKb6LL{#M&Vwz)l7;UkI#!udMIPd-jZq5$4W6w~m z8}fHlT=+hAjeiasLNCWls~IgbsT9?Hc@_9!dk%KxR;cI+XPPptkSTEXHmg*l>8UV9 z_M9Q@_Z68wDD*d@#r&#LAhG6~g1{1WG;yK@yGwKCwYoy?7(N1xb6!EKn~(e|_u~`- zX3y;T3E1+XF%f}l&YYK^W@fWNL%lgFO3_-lTh{r6HK<&}3E*g)7DK_}gVI7Ew+cBk32TaT=r1tl5s8 zEaxr9+;CDJQ;}<&8(TOsjoE7y^=z2NJ{*0gtMqeIb0fZ~9Eb@^k3Q$1piu*7=$un5?su%sA8*Bi^|Pr$srpS;Hyf znF@Z>2OHI%cGoAezkRU{*=22=JN&G14`50(|E0CGDyax}eYypA&)urh#oNm`XSA9% z%KRL%4x6?f=U-dD2L8x7;3K0B?Uv872hNPQKvB%HrEA|_k3W9C4fo4y{rx9fuyk+1 z5rsB8q_a~xYtjEC$N2}@yC3~JL3Qvy9;XoZOEx0pcZ@w1tPV?~Icc2G75CO#Z&jk2 zW4La-@kTuM*kcF?2*A@%KdsX4F1h3q%$+;ed~c0CQ6!rN7hFp>)lPx+QtaM*6r-`+ zolGj4ombr|BHMiB$-+@Kentft$+k{a)7T~SvcIu>KQ^b8s*d=)(3Ae>qH>3&6BL8_^A>4i35)xa7&p7UKfYKMj&4Z~{rmtArX*X++T1g89*2&+%S=DbE+H?Ia`=CPQsNkvQ;xxaTtUu9HjG@6qO_r>cb#-d+H+p>Wv{emzq_VDAo zeiJ%(M~H~L_FJ^h_y<}p{}|1KE`al|e}VH?*P)dHkWpSb?#_PBSg$B5o#b&8+&|s} zRkFfHjqLfr8fKF-Of`jI?>2iA8mt*^bq_?-;LGGUS)4f!{JL+A$TW|!sSd52Wz85Q zJAJb`86)2F9{?8r1FBOv)5qz+T=BdSedal&Ltl;Kd>vTwPZ;C5ip2JaGwQLug4;hC zJYO!}@;pBnPE_{QSS5Rbi1yaFDcDo)B*MB9Z>>mGlN+Y>4@S7J8=?a|FfFnbj?6Al zGWQ^kB}o|RUx#K#xAy<0T<6wOiM}l<2=X@S+C5zy@zg~_@%JBhA-SMR?PFP`wQ3eN zXF#{t72HBbaW($4BnjIx%2b~K3fP+__cq^Zz$RcfPL|Jas2uBWz!d9CMn|gjb!)C0 zAFUh#7AWTL{d}wHOTegfuDs^0@)|e>j}74tCztap>Tu88tw<@XR>#TL{L7wve77wd zbyi*YHJa|o2^HB)4X2Q17x03D0#$?k-~awsbqwd?%DT2=spHX{c)0K}I@X!>TPR5Z zyC|#XQojNBB41Y%N@}}8WXne98*O;CxiM?(7J9ljq!u5tiSuV}@Vt#)^x~=pe7ZgZ zzwXY*MI$3||IB!tJ1iXcPVa}OE*yl=<^mw`7bA+oRKzhi8j%dnF=b<%wXYeU1u<4J zPYk?$^NdIeoiM-t)o7H)2-DhEMR3dSSAGGEy~Qx<7>%*ag{SkzVrcPSb{A(2Vfre$ z!Z-qEZg72^JtQ1HZccdoyg{llP2W&2G>CDw`YQzx_SZXf!8=a6fl^aPxTl z`?BGh8#W&Je z$A7vP_)5+d%XpY(uKI46;l4lU0;AJ7d&c}I_hmGk=Qwcz@ZDXwIJ#EZL`-Mio1TmN zhBjcynVFbBZU;Oef7`OQx51<$sry?aM5v!GhOw1nl#%>iW8hengdvf>xN2-P9zACO ze!6EW3M=c8QBrHZqXFN)Z@1*A{p@gLiWp~OGs|1eRP%(KGCXt1P~0*l4tJl~7az&z z_R`V44rEXq31&fXX7S4l9Kpr(OGZQ3VVpJD zY+Az_Jwn5d&2?=5cB&Y;`8$jzXN&%3%(CS;88%cKb)8|{v1yN**DBAycOF`OdZFIG zKYIClV0bTIoH-yw&EQ?KuK@3@PQ!nfCF6^YSvV~|ST%47_hScf^WW2GG$$RdA+91o zyxpBJGR7bNp3ZQT{gVH62F{gPaLU~xf5=L>(5Y7@;*vs|5nh(1uy+K7rZtS@a-Kg^ z&#Mbj?h=a1Mw)wvu9IDfg9u=k+zl{gtd?IyurUpuBA3wFKu3azY7;YsxbAM3_12ah zpBp7R(XyU~3^Dd@mi%y?WmXZR{X^t-3UxXt*oB-CXEv+kY;HO^BSr#!&O|HI%l)H) zjqezO-3r!E$Ca-`zYs43yPI&P%WlLhyBF?@9)Z_}SKz>gbev3KLsMWpN}YnSy(S37 zqwYpayu7xsp~fD-F>Rqk(U`vnz50*Cy&__7o!SR+L7pf*%uHuy{M&9h)^IPhEgf34 zy8zGpycd^@io|_0<1t!3ugp|DT6o{V5x%&4dOtNYepr;BeD2#uwpq3%JF)q~i|mYy4E*xTFKSfRx8Ht?mtK0Q-Ait+7BBWnRYQ|n-~&Bf zJ1>&Wnp6tbqevCE;NxmEK5IPLHQ8L*SrbQ3x)#}Xjic<~LnHlEx}LRN!5;^N`6$DH z1_@`EW|!6B_2nseZCMKDC*-N?heddzUx^s3Y@lOpxumaw>QKw(J1lb`Za(IkK67+| zQRu2d<7Zo#(rBrxWE~z2K{vU7)@CEJJ?$Z*VNr21TAbXGHsS%yPpQXr5!Zj55syGG zSG>O_4L@$r#hR32l_DN1hS#WQfAzi8_179r*HTi|fEO1f;-Rk-@Zgs_@UZ;*>e6K7 zSG0{j9xHVInQTW`5k^8SqrEsbusG3tgAE0HjQj0hDtoA`iH^>)M%vf9;eZx6Rp+U6 zcGU}oO~LpYd2GxUVqG?2Ejmlh7?rk#Vwg45Q|_1J%?6hI!#MVk0fu-E7qRUbU^pf$ z(BYr5RMop1{Woq_=hc+44o$p2AKwn%McdIQ$OB`88WB^p3=T|9XWceyzA50x-Jzn@ ztzIE$^$bQ`^fcs;e*|Cesl}(8a(@?CoSB0{RWFXi!EWS(BMu$A8shB+f6s&Sua5;j_Hd7tO$?A6JQcQu-mOs9+NzJ2@Rvdb=0&CPDU`DQ%(?6d7&Sd*=> zPiWAWRW~9?4E;{)R?PG+ij$5+vR!4*ZE8hTLRP|%UVFDkJl8VMLtvYuKp;GJqz0h{RvMd z#o&`o`KsAVVt$p%AYoG)8Z@l`oE9I9E5`IvM$&QENE(f%6JXTto2^;+WPOIJMI}t{ zox33utCFocRv#nGCTsBSg;v!G+XhCn*)5wHSDeew^{G8LHjuNVojnlb?Ezo6PB-_C zo4RHb3Y~kQNsd)T-&sz>h&EHcts9T3u%_4n6w|i2`=hz3(MTUpUSvE^)+S)nE;e`}GLcW8&|bB2cDm8(V_mxG!emv^V-AT03{gl~HZCQJ4V_s$^~12L1)>gEsA zx3bnu{gY9~qMB}*2^lEs(5=S0HR1PY?K2%Fra^POU?+0CbF#BaYp{N2BBm95jKF=L z!qji3A+Wi&viAiAvjyH3D%Y=hMK%HR7(_=`lu!5*%45$#RQX1j7Cv$?)5rqrp4q&T zwcgyQP$)FZ+BNzHMFQvRs+L*RFs)(dQMpl zni?BamY$c0YwlO=HJPP@m>ta7^o(7=hxPJP(PHcS(t_un$hqwN#POBX);Fm}V*88a z^C?%vhkBt;h_`x<6A4G#CCffcM=@VaZ`sG_Bye|jI-+x!pZ6VEW$HLtzdkVB`>^Lk zqv=kZ5Rv`ihacjvfBmcZxAtMj@sT4(nt$tBw8(DD(tQPZb9oBR8y=|^$>WBe_w{c= z&HH4n6gRe`puVXU&u*y4HG^Dmtz1qj{94d4z~*PO{@I~4_nv?~itHLO95Tfy`+7?@ zY8#qx#n@;y;$-dqVr9Uv&Wz1cYU-O+`k6IqCaXp!Mu$#JH_j1{ivaQAXp9HGceT6z(y)_(qL4>a`gSX{a`8`btt@ zTK0^Q#%O`{+l<2O_#Dk%;aJu0Atd@ug^!0D)}$0+Xr!;Id!w*+mw$O291}yc>5JPZ zpl_(RGN3eFhZfly3uPj*uX=mE5qT^Yvn3Nn1^Kukt_0pm zYy{S9L{2%uAZHtJ=3|C~uT+E}8#Qv`0n?x(M;T4NSmxppfL4~YjJ*XWZ&{~0nd24N z*&?d`Z{Z$>w2rwIjnR|Ql(a-SG)qD!o{jhcYgDSrfZeuLJq;b`%Xg=5+Zb#9s?WWCQ=k2(>)nf?>7 zsWuEREYCrJnWfB1s(s4+FjFG9AFxw5v#vNN zdt&cEOz0D+`iby1W>4_8ZJA|iY6g3Da0WZ!%cA%ujD#P@$FV~k>)%z*b9or=ellOL zMsp%LAtIag*2&4T*b996IdkUVz4zWjNQf-_W=4z{0dH^n=#HyJc3ZyLl7-JVW#Wo4 zQ3&^Ko5FNVP+YU?GE>FbgoNp|N6AZ@>M=q#v_~d%!TZAWucSqQh^@n>VOO=Q=K z(fPG}CSO{dq~?>}b$TB)lXLOj0`on92wyk+r9Q2Sw!HJZ zF*v>dL0jioNh_|#KYrYehvQPvKkZXlOJqgJOEA)t2cB;@{f1uw?41v!$UenV6kg9Y ziqi^`MTq8XMr-dg(R$GvaG>z+WR6#4GlIzxPyhZkQLWQ&UmT5bZkfPOB8rDy36#m& z%9+z4Jt&HleBi zc}N_3KmPP{J|g|y)%qXo<%*%be6hc_uQ}+S1hAsx};UP3=hc z$sg8GwC(gIFrMUrBt#0_& zCLqf>=3v$Z4O&L78Cl`zE;ZI=p>Z7>n`Lc4Q>zI(%balAH<@_F?KbSWcoA~1S%SRD z|3pgI+1PdN5BPQB$9O(N1i6c+N{6d%XjZeXcjr~$q1pYBB!(aBu4xdCi1xz^mk-wh zTcfcfb30Zb#h02;6PL~Ijb~fdxPoA=MD{5$LTF1drx7V?6vYb z*$ZXETe2R@u}!`YIP))REiO&piLA0(jPD(YlIjMW7UzkVhNfZsqDv9@*|~83@_IRD z4^&?GArdZFh;Jh9$G$2L)Hb%L=9hG4G9{T2<&{ZAs&VG-oJu9A*?fzQGNS_A)gHlV z6z2_#!1|P8Y!b0NL-v)+Mn|gm+1pe8hO5{!w&O@0ozdhSg=Oh=2Lv`@9e0d8?!$HI zZ3H&?*Ux*DxVMFM`Rn(Wv=P{Zso39a$;SHB;x+=Ca1VTaX^L{vYBZXz=Y)uC&B=q( zkDgV|(Y2)L91m6UJWNjt5JR6E&+#L#i5*TE(L2goY-^OruIeJ=fHk_T#V**b#FwV) zND{+>sp4c%gb%i5h{$b+W%~*-I@(|T%t#PxuNjqiYefns^$AjqW3C(N~Gg=+1Mx(JmSc<}V`;1&af9Y_%b=_#(Fe$D>&84_DRn|43ytYvZ zBkROMHn$ohYehn-3+l$*jpo4qvfjvZ%RbuV=!O<%g)xfG$q1a)zvu~I=eHujW#8hS zD?+?JZYGiU>dN4->20|0nv0^ECe*gLU{`LXTKBIRAA<+a z>W>>H$6~qc6C9H`ePF2i+pHV@DIo`L&JHS@=#g^<;?C3iDDlg&ivPZRgi6(ZX!bz7 zd&3yqHGKe_`<{W;;a36P5wbsseIxq=Yv0QUUnlR8r;1wBsmmxfFSMy>TiH_A*n;e` zL%YZ~HaBBgVp|byVOLHC60+MLgS&2jF=`sL$ku4=0WGq-F%;DaS>^c8l4QL2Ya*8J zEjYwW+EJ?N8`Z`_LxBw_j+>@>qpq19nU7Iq=T@;JwE2pY3A>Rp`mRLwp`K>V$-_?! zF-u2P&bbcuY2T1#V;SB~yxp9YVKY81P>I#~3Aw5{T4_xq7VK|8dgu(a`ZA^5EVP~7 z(JV$%n)hfpxws;)!4)q%-heHA??DSw7ug^uXaKPNQzLrB20DW;LhwPTCy{{9GRuH%6Q(IQKq^Ob0ild51f0=?=0`b%#jc;S9Ls$T=fm{R#BQUu>lA* z8)zDh#y;U1(?1S$(fXedKt#m-!FUk$HT;0Z${ZC+=Z4Y_hIj_NAO;n zAJR(eRHI6cWMq0b-%(}~FoJFi*ZB6jg*%uWl)!HfI(UM-+V4}tsT7?y6-J}c92bsH z2)rE7jS=YIJ`}PCZ#m2@SQw`6Ffb~S*fB!wMzy5la+TLBr zD5=3ee%ht}WqR!L#6kp$sAW{~4FCZD^hrcPRN!WRtmMe%vS?2}ddo)A%fA!qv8#&! zHW~2@HkyZZaHp?t0+wertC1PrE+={8;A`dUXOo%17t2O}>eM;a*Vp5J|N9^AxZ@5r zj<2g>=|XZr6_Vsak$ldOFzge-%*+c5>_qzeGy34$ZP}_`Z))Elg!{Rv$I-~)NRyE< zeqvPRVtraU`VATZkI+774H<~~km0E5GY9*EXW{L=F8EMBql&>tBCZ^1X|u4ob?mrZTH*qo(|W&TYXPd;K0{VL*`qPw`dL1mp# zP^$(uBBa^0E!bQ4g0OcuG2_J-U&Q_Q-wzj;liY|=)1zPn`&Zd#U-%_a_U$6IZqON* zUs;cFu>mTLI;F4*-)_xTPQv0kV2`Xr#r`9Z_a`=d|)p>PiF3q*|- z)gK0AZiFgvD$hopcQl%NO+bi`JG^Bb>g+k${7yXg+;e#N;fIguJ3)s+bwiVgPY1P@ zkBtk!Yb(;Rr#=`VF$3TlF$6796Hyp-7GA5GhIclVVvC%o)S_xc2e@Ih(|&m6$(n5I z1osF)ZSUD|4v10b_Uxfy_;q(a)`-{+l(m;)H>0FhMSK~U;Avr`nl=v|CGV^fVV+W~ zYQA@7SI8bxiGdM52$gmEz$s+u8?&3($vt)ovOI_2;RU&vx2r%!nz`>#SYJ3I66-|V zbIoTeI)C)Ju%i9MvRejTlUe_Z1+$ zs787ES$nrJyB?d}Cg6Mbi||O{Ed0A<0@hT8V?|1-O5tPO8=K-xj1Na`vlu|^+{@8i zN3Cr3B3d4IC^7P zq_1jFLou(>XigpSDr)i5j$EV_*BmJGqVQfRg8OG#Gd78MdTQQoEZCWc^x|q&QZgns z5W)U_hz#<_s+1BeO)A6l37+`T`E1ns^#@Xx0K0#X&2lM_u~tnvsEIlqi3Qb4NZMn% zI7KLHt2xef&aiM1wnf;MS&r`#>+r{fINX#p5udwUjK5}#z$XdSs)q%Sg3ih>x8!1v z$2hdQ8|6XPR7XxhnMVvhUZ05vX2q*16m-xsb7;w)e8h)%tO)dC90=DqVe? zh=BVphbu=jUNR~Y zcb(o>rK?B!xv9TRvHS-&YJ*Gq+hv@aQ=M z)g%qxM(51SSB_M_>qxdnV;`KbIoXYkjppClhc%CSPWG|0SvKL{y|+;f@gHJtv{EY@ z&~pp-s`Mnz>Fg+MwDI%KT)ed+6*o+ZRh!Ro!3HL2tAJhK0t3!9#)@SER;%i+ z>1-;kuE#SRon=FmT^EG~2~iM{?ozs8=n#}a>F$#5?k?#eq`Qai?hb(g>FyF3y8FGK z_Xo_Exlio9_FC6@cTFFHjn5mtL-<+@Yx^h{P$;4ird}t};)Ql=g-qQChkkJZ8~79) zxLEp6Aal18Vj+!|-4Y2etp%s99IS_?#y093Ruac39#y0++eHq8w(`!(9(61S2JRUR z!<~2X3?92uaxyEThGYASzL`oSanYiz=(O#1A3u}a?Hz9xQ6qYLMQA zm1scW`{~UJ(wAW+|83iY4<8qHztyC@TzB1=$D0c zW0$mlN4IX1hgZCX^5EKn{_lWP2uJ69y!+5vt8&n7VC_SxqZx4!U8#8&u5GyzHj@=0 z$wr`S1$p3y>O%Fvu_5m;xZGn3cVgUpx5OeM-R#|CLcL6l1Yh@f@6MO_#m&FvMV5Yz zrJRUu?^mG{|D8R#4k->t$HzW>5$$i_$8UtnY%AuYeJFJ;5E{RH-N=oNP0os$gVyd$ z^kC35XKRXMxbxYlN9&~La(gx1rBuQRa42#zX+9;n0N#UIh_^?vMrtYx*eYbW1q{bg z@a`liQhTv&mwbYrb%VJ@S8dEJth--Sk0l&_LJpW@H@wzHeHeDmOPMup?X3A{3lsGL zqnWNwYGg1}6>CD_)qKonm2^OZ(Zx& zSb={N!uGWHEe8iF`j&R8o_?m1XrImn=`$tYW4NUAV5W^dquC+}hVC^AP#5Ng-GRR| zmWWD!S#pHE;h%FhAug9Rxc)-6YctvfIk8rzdT?04w#k~+;&VV7CwVYgX3mOBTU`s) zbJ?TMu?mx)AQbqN`&4lbkd`S1znqOU`hEU3fiU2N#;@XYX#G<;H7c=5U+#{isR3(Z zxitAtKmQqpdsclHP)xrocWR~lbEhInmk#;_|3ICgs{w9U`-rtVL1VV;mb}=9(XKS$ zYX419qXnzmh5tH!o(&T<-zi~;#@~B}7YCQ}uM?rO!7JZp`gb=z4H!$DBP#j+82#!= z15q3Bf%|Pa&nntRMe(+Ue3R>SHW13r%1m*ID}p6l-K6`?N65Il!Kl#l6N@$vC794X zk^=H?r!Z1Pj5=H_UGQ9qgXNKnc+W>6*imG-i!|#*RjlL3Gafaul|%1I^bX^XKw4=e z?w9u`5~%9gk7dB>eh)@jconhskhT4grKB2C@bK1ta4=1!<^0e0(x_9{UcRrr)1xLM z9-`fSk;GATn;m0^cZ>5)&Q9w*EM4(?k!g%}LRZW76t8w#c=izLv;LrCYXP_d|oDp|i zF)~Wq`7#^j3$CWUMpJ$t_QjS*u!)U)&sQT#s*|Pr@KEY6>$Jp7T`fE$uLV53<*pa; z68}CV`3vTZ{%^?GJnPnu_#cLXbgYw?2WN7{cb0b7F?Lr(d<_vV$3qqJ^c&G$8Ghv7 zBB#)#%ve__bJJmCOkznd;E@w2oF-lu8x((V-aeUG>$fYA{F7w)F zkun$pxd>G-rC7{y2heeJ4NAWSg~wD&fG*dwIwt zuj#|(?S~om+so(f60)e9#!(LAOMk>n`aQ-zzb$lC;l$v1KQ`NhL}5pHx-n9`U@hnX zB~6zSq2-PANuf#ge+ofhPiTtn?0#<$2Cm3ba&hhuEs;rwop*~XSR{eWXLem)rFMi! zD_k;M49Y2YOdM)m!#$DnoqZ@1TN<;0rZM-u*5pWhT+Gc@=V<0noWvARWQ*oO5b)^~ zD)Q=7+^l9Re?`eRp-k8sq!IZ5nfckqeYTGIc}8CVyY8^M+Sf`%9|Vp_&R` z|Heq6H%^qWn2G9NQceZ7O^%d8nnJIwwA1)k)ViL;PLKrbBEB zx5}S@nyf+IX^sa}s(f-0EKcV?^1j9@9E@eXk}`vMNO;HBX&{YbllifbYTn-9)5O1# z%CfJu>KbKOMmR;q1>)jrW_7avNJAs~n;usB2(($EA@5Jw_$R}EYEe+7Q+pq0;YR!F z@*>Hl^QeQSbE^A0v8aM8fpw-yWQmT7$2~ zBPSLCw?Q#IolG|6Ea(-@`7(YO1;`g|R}ujc^gJZcCYQJF0xGbQk43=uO8W5)Ge%)S zJT0yk&>=EwM!kMP8BblD6%~ja2i6eOx4`T&f$_nwFleK443&rFDsFDrByn%s306nc zW)@dmG;O61u#2d^8yY;$II=OwWPRx8&)bRsh6Q{L(#*!3qO)%}NbMrMZfYbCtLV#> zL3=-~ZvZXts?h}fjVO@wG)7YV$0-ws5AfE#SBN0H+h5tgof-WgZRq+K3&`|egEEks zX8(qn(vRQU>F___-f=(Pr6D~&6|OKi7G~GT8qe63AU;U`wI{IILfShdWjI`{`c9sp z2wSv<#@Qxbg<Q_4tP$YUqE+f0RN@)4x#xbhVhKmnxMhd*XmVVc#m>cj>xRqatzL z@A)o9Sssnt6+a=kov{^h%^Id!Ki!rQ_leGDpTxNN!&J{cdO@x8LWfv=1t&v>ie~9I z>epDad+~9!yP=`0nJU3NiARGY5pgk^%6To(j#*9a4(X{~HQ#gxMSK?nMZB9ymv+)? ze=sTyH{&ZzU#y8?%|!clVq*?@lRaCYK0i7Kv^_t*M;2*8g z7}oQDJjE4r&Ih6)81iFz^xm&$+~R%-Tqs{|*^0mw>3wmw(^UjCzPWjs9IDz9Z zITDB<)DC>tnx@qEuK(YN5`Add*=mexK=fQ;=|AOo0;g-MzZP7D{ADsGi)`YhAgqpK z1`laUXAX9#$M(jGO7XD0gakh_hpCa{iOE+MessN$)s4dS>IF%)T>Xf1jQp6cJIb4b zN6nI_Me>`N ziF?_T43nc8Gr-&Mmk!{P1%)NVIk-L5{D?S`qvTu5a@1h=QuHsgu8x-ppko8|qCePy zVl#UIQPCs7go3sv(-vDR!V@i{VFde~3eH0d+Qj#d2y&F;H(?X|2{q(`bclLTjiz7?g&&`aA*k;;lvsrjWu5TPIUfNo=8-G;!X&__@?dA&$sZcH`NGltp%hOyudL*BuJG zj8UXex9VOuO>uk=-`HpFe&K+`#{Zzg^K3d6h3A*#Nf+|t46qmMrcRX_sPjk62`)9IeYb9j z0#VjOx5ioWBjImg zpr>ESxVOHE;O;8F?omoc&5QFZlQpj<8kwULJi1Qx)bss&bkJUBcCK!LRHZD7swM1> zx`$n9wrG!)G#I~G$1 zz8L4vA--;vK9i#_;?^_#q9A7~k@hg0;jWESeocb4Ys!+B-*Chiv$amv%u~5p^qL2l zGDYDnth|AKVK12#8I>S>D-jSY0Byn>4o;wWcW=uazN|if8ChJBA-3xQT;VroAfeh1 z$2)#?=PWo2x*K3?_W}M3+|&v`^MuG{V3ePh2a_L&FZEhwC0~&ul)+gY58Jr96#uFm zRM-YpfQ;tQW`d)bNmf9X`b8Y!lH1M6s<$h%B&SmYcXh9O+xyrU6fR_B65ZNGItMl6)~LHv#WYTMv|>wJZl1c|<YprG)FZf`#xLus>eNSNRed!Q0=OUfa5iZy*1=k2ekHxZj|5JI1r6TCS z7ca-#qf=jbn!9MCz`mR8&6)GBtkyV&r=Q_GQm!^t%T3m@F6|mp%G6u z(3n&trN;r`Sqeq)exESHt(PIq^X1O_wl=ySAW4HcZ=$js;gB(E<8br*&IkVqJwRW7 z&gn9qKF+ChU0-ya1;SoD8!1^gB{&P%D@>8T0*!5|TZt_bQ5P~}U!&V{!{MU7kW zNkpFewh6!Xy|+NkOzl)d)8N<4AR%EBp(K|ZTgtNt;hT)bCW8*3mOhiFGsAvh;_k zPr`cHg%Jmv0#w6tgx-Mf&9jRO(C&X1DY_Ml&!?msB8nu|7YsoM8aQ*hOxjt05qJt- zqMa!MrASVTjxPGcm#VxbW+yg|!^o>HFW7Qzvnu;k@B^;|o}&81&)YnbchNA7Z`7ng z*C?cfWyMi%d8eDlUm;N6!7D|(VamqP-izgg7$QIKCClZ59#D~>5^x*51JhXTcjOCMmMn3im zA33Bf<1+?{T=_ryEF@G$HRfF=INHxNnK@eprNl0qR;GP0Oy_hi2j|@}*NM-|WH$=b zS7}1t)mZg)BIDc7=G6>D)&p@cvmdtivymL2v|1(oUe1=nr(BpCiHL$Qjol|>S(QPa zNLlKpPJ8X<7M51VoY;O7V`;Q}%2^695`@t*TF7^q5rx1LUtW-wy@YvSysR4>NG#U7 z5&7IbY~DY%o)o=b!0*r5)7|pL(yWVP5{o7ne`>}ygQyhj{A3RndAx%oG11Ki%k zjeMZtVZw34vAE+l(5x6}nf|Df>0Gwo(24wuz%pnqmDtQ)ms226kD~?uzl0ViYS7UA zY`%$z7S!9$`ey@rULMI||NFThSrL=mGycRljb4yLbeZYw)#~*(4Vl+9QV@+fl43Ir z;f0{6jP4YIZ?p({z9;&IeYwc^v=RySUpA~UGTxy}vIUG*o1}}3EzBnlu@_1N{kgax zxwt>19oQWBVQ&$|ok0pe?7s2G*gfWN$0QRh?OY_DSLdW`MMNO0ARss{|3>%VZ+Gx5 zCiGtBoZtGVq4Y8SBk^1)Zx+o11}PQE{)|R}`0`wGvGGz&oxf z#iifvcO|P3LPDvX7K_@6qDhP$5hA9#&SVB&L10srapk&`VsH|T=vj{`20ZLX1aF?~ zVXA_b%KnhxY(VTpcy@+zH--o6O|>~&`b^kPDrI|#M{Y`Ow%X?4lt0gFB?_T0Ky(Ki z6Is%8&6z)+zCjj8dud)K%@Sh{cb0t0e#m1H^e6JKYEiW>yZ>>Mp{8xUu8{MF{AZV@ zQMOXZ7dt~IZL(qagANSi; zz~Ep!SS3pOVD$mK>wdf_uuFrx!mPNNPL=6=5Is(q&+^h{SCvXTCXL9gZODfu!R}MV ztz(R5H;RvZ=*LF>xkqc;@k@@!{=e^6KFZ-mdk=jauTfZ&!@A}cbyoyLv?70F9_|!c zbn^8F=&ap6Mov0@`*g$GfifHP(j+87YAKN}(8efIc_cJ2seY`kAKX_a`m@R0A>H9) zB;=j^oW>m=7ZJ=h8#qz@q>Mz}Gd|);%N)JIAxIbfQAr~aFfaBsVBbD3_ixZPw~}Y!_s-}I5pj+{DCQEJj3f>z_m|R zEl((f>6_ulD(=@=^NEm^SyK55;v}olJ5Ek5DXp`dsZwD%puz;LhB{I$U_6+W-Cr^&9GL_*sNFWO7%v5u{mEPM)8@btX#!5>M zBL$RL{^DBp&vQQ43%B|@so~J~AG%@L2&5SeDc%f zK>_!UqI|n;%+Qm)^8&pwCLHOYNH-lje7RlV{@ld(Ivq64I(+DHdq~=J6ex30 zPr_WcWP*eHbHdkX~lhT0ME`CB7(91|7)*ad)M45QOpy)NW3<+ zkUPK*a#bxTVW*h2)5hXKa9M)mNKQp6sCB=FsX4vZUVU02iJ3`=wL)DO{fa^iS`ZK0 zWdvnlSdOmhWR&zVDIxUZsbu{m;*vhwnlG;}7H@KLV&x6T8a-o#b3G6L{&YoX+FP@? zEllD%c+XBm&V>`u4k0{E|DiQE%>+&t*gYCjz_JoX?KG7}GYa2O#yV_cg}n1PkvqiC zrvh{NX+#fJb4Zh#UWafCait#rQ1%lz7Hp5cui%6v2SBK^$B6M(e}=e7yt#H!c`pjC z(IEFaEBda|V33me1p#J8&uQuQbDNXBQ&C)-9&fN8dA%n#R=uNAUc8U0_ZN)g# zmPQjJIp+@VL)iBY2dg?s&>j)YanR9i_0&!pjve|^K3_@H>)k-91p1`aBp?SlEwMK1 zE~QE(WQTIjPR%F?C5~xU906t98IWjm9-D*4XV*a$oOAb{UTV3}*hR^bL7~2DQW=Im zHa@@S?(S#u5)A#u^R9^(YIzSIq!yxi9U`x`tYlrsV(b!DVKkOszu7rT6W{QWQIj%9 zR{2)ZJXP&@H=Ncw9RGa-nuMJbZr-h%-nRY3vcLo)JvxEKSL%*GJU)blg*5@Rg(|ZN zs-|Yr0|?wAndTV?^>z0y`~a!*C-P;Mo#dh>}mFe7v2(qS0tgx5M?YS}B zz))RYUP(8771qgUY*I3NidhalzgKN@f(k4fIUavxxbTxXc|~2BFamNAACFhx+1VlB zu_0-&S$T^YJ@${+_fq~+_x#=>ACC|+ND%gu8*G(L%UzH}cOpV9U6!(ChX2+sYHu-F zb#fqPdkYW)ONs8Qb=fu151I{5s~%X0)7g*g+^LkF7H7-OTDwz<(<@sJs}sT%4Dz*6(jc*kGa&Cng#CsxS)GY@xvDLRpSLn5Nsy{tnWnj;kDVy-;fVjo zb?Q&m=^6QM*x$N06=}M2&loZAAYUEMMXGAZMfF-t z2BT2;k>}HlOkD`6o&=S__?*N}NSb;3{B;qf#9cDKSH5ZRcrYQRHAS3tMWv+wxI3sZ zvXW-*E{zLl&?q@ z{P=Ip4$ml=%(ELa!8DMcL`9Kqf4|(D>F~Qm*o%k69u)=@yGJ6je~tk;b9I@ueE_4G zIo01Z^ZfDsahEX6w&_?k!Zd{`mvn+AFjF(akd|)lUq0dFODDJ>e;I%ko6JY24C?2P zMNE^wMSpwKueXQZr-YBu-M1=gmIx~EC??dXnR9f)SMu`=Q{PM+k7xx3z9M^OmExXn zmBT?9-gpGa#xkt6?%KxXTi)s>y`BBem7EQ?W5KLpmfk;&GfIA#`X?lV)lv9bdHXrYo$`xQV+{o;1aTaF1K{Vm4?i^%{h*O3L zO8Kv9R(C?WcAN8H{fo9@5@16mMw&}BWyt)$Xafyc1@x;s|u2v!Kq`2mCl7M)bI z1{HKkrZ%aA=^<>GcLI;Hgy1xUmrtg!$)VNXx=;(m6!9E@ER_)!-s92CFt`%5ir}U< zTfB!yQ?mL&yOxe%oZv$l5~Wb+v^yRxWlnAE1D}*h+0So@^t}Q#Bk{c)+(Sw*H{pq> zKtq2^!8t>)cXMJbf_8h^-ss0og^3(e?V*%t1W2dSbSU=3*@*ECw-zrvcR{zsGcth4lJ>)nxo7zjM7kuwPIH_#dxby zyaAZh5`GTHOM*ptCnKC4xT*-sg(ZAUtcfAQ&?g-pc2#fGa%wOU1U#j|W}WQVOc-li zm%!%i;cXH2(KNC~i7rco3UR`p%mT_)S@@T+#Hx!~X=~bJAoTH6*Qd!qIzId@=O%xF zUUmxvw0`}M$w=mYRywbh*ObHY$aLFH+R9JUmrwQ5=^|PDIL8Y}!b%p0IxNXwMAcHHfjpU^N;UhGQ(>Q%IqqhD|Ko8aOVnLy;u0>&{dySV5 zFX_+PuIJCgs-iO6KHnsoQfO*fhABjdv;}ATwK1mWHXMOLDqwuXJN#kPTFJBYBeKi! z?ph&$7TYbV)R*I7A6w!_<06u|`8sFx?piwzrkt0yww50>U|i$A$a>Dcc*Ymg`gIzX zE=#&*oReRHn{{l{bxB~l%yroZD!7xekTRC=B@azOmu{O1wYgzysi8@;Q?M|zP$kt~ z5KmMHmzy=!SFYQ|2rRw%xb2Py(EEn(`-YfWjcad^`l&4=D)Fjk zdFGWMp)K~3wtYjIgYe9;Z8ysrjopj$&2L_8P)fYhc;7R`V|f7JDQ3o{mrV9C58En9$@8dI9-pQ#2rYs^ zf#`y+2h~`@!+#WLD#M&tAD3|cz%{=9gRcu(y*1{yO>vR^DP8z#BpfQ(s4QNYBHpEG z$!%o#X?c8wFe&^9T2nRJk}{~Ms1nV}V_K?d!>^(~sjNQZ2Lm>2>eKbfKuMiFuv)$M z^FQo{880A z$$(sT5*bg5b(FN;#~TwUSPf!Rf7k98K6T)x>})}H&}@0{D$*r_H?;a@nnap!%89H_ zJef*ZJ~Hpj-v1xu$QxV!l3VXhGZJpx`@@@7yIxP?Y6Y^JgGn60YaK)vBbB1}b-u}~_*3_& zjIZV1SwX;#x%w!abI|K`q}Q_I^R$R*pn(k^vf(f{U^*Nbb^*vB0YUJ$ZOwwXzAyHe zfTCjGyRF{l)^Er3v0A1&MMJZYYAs{ZwID7h*~!&KQ5IqL>y*O%UM(sxKc(ZDJG~Qd z6c44{0Qj^j)RMx%o?ktyEJDpM(|w;3LQf@jnmgJIKR(-m^=4pc&XZZT!^&7^_$yA71>tx@T)S}c@b`R@}_AzA_e&+?bZGYCrE!MT8zS?)!8yN}3^MIS!qF$$^ zq#3Z6?{mR*jS343w|?R4%3}oq&j=ZgrSmnN_PkT!>Q2;Yb=k=DJnjz1Wjwu{k~2JB zZoUNwHxtuFPIK_6s*9qMQbh;|o;*yF-7;_HvFm2gR>@Mc|H~tMsKQ*_Tt}_MYT*&_ z0r9i>7e0}YLNAUXw7_6I%VOfbWTSghDbn4OA@O1Y0ZgCz*C!RSN)VWtdUeDhF{9%2 zcaqut>Yo&Ogj~ZN3#q>>Y5Pe2RY+|xzc9Y$o?O!r2k~NFwa*kwmWA9kjaXZ86&+Uq zcNr~`9wt6$rMCqo>F|9aNtH<&=i`ZR9=QqT+up3s)tp(rSyI%YErOJt`+=*nIw*pO z)rdk0ua(;Xj%DI-l_TGX4uO9?55bD@bYoIht7bHxSgc{Ai&0YPBsUpx<=*|+q9Zu7 zG3?D%ymLC1{^3;$^<45fOhLm7ofI8%-n1mL8021KYB!pflpccQeq6YF(oHV11|2Y+ zI2-0a@TTf{0Z<%z)_)WgcK$Qt@QB}@j8@1C3E8;W2Bun_-QKhsMbi&fdY+#>$ThA@ zVO5i2>*t31Uo{ayd#C6p_QQDi+BnAs|84X2$}w0&MU!c&M-7e zy~uAw!g4QdYJ>)56A$c^n4eWJ41(@T-XK6E_;U%Knx5)bOTMyNF1Zm~Ru^8s`)CLg zo%y`V#ts&5)>sm1wxu}E!ittL@3(-D(77Lv41O!VTWZ;l1Or&QNc^)QCX&P^HqVgy znRNR$8{cY+<9-E&eyUu0$&`$*qSt@!o&Il96_YiknycgM?PV{pjj18@@dcBA=3W40 zOxVhrSMmHKS-%XP+6+u6BFmrs>1(&k^I-hnOgsuwysSOgEXz45pkA@1Da0_WClqXX zf9X$I@{V^;n$tl(?3Q68eE;c1c0< zu+dTJ?VX(*fM#mvVq?j6;Z96UoN&AQOJs?9`FnE_=9`P^lTgieD_QX$RQ!Rj=CE7& z+**14+GQnM93t_5!rcGqV=YAN+%m${T=f2lHa^SF2O5T-*eQw@lyv4{^~QooYI}f^ zz;ep&`mfzgQE+iwI=P%bDfW7p9o80)Ojxfvd+>aFLdI|k%MgvZ*XXE!4gocokQiw~ z-_p!R1++Y_%YlwLPL{oEP`fJYXwiFuRGSUo7s%O9u_K}I{DS`eMn&v5mIb)1dAE9B z)yKF!+@%IA0QjC{g#L5cVj7d?fAO|c6@2zM{_<#JfPVkWjp66f_PU?F{qw<@H0T5! zva+y-*3sbnAQfM<6AG>v0sOG2#r+_Ll{AcD^|?T?kA@Q~vi-`KLG%jMg z!}uL%sR_ngCC*m84hJONW{XcOzvl+PjX_kISF0o&!>gZ;ghGHwj~;O5&dtow$4ci| z`^%V?l$OFeN#CZ~RS7Q*4x&y$Wdyz%(bCdx6&Ggq06V#W<-bQu@9-W1(!K{XRa}NI zf=E{SWtXIJUHPlATy5_Nxc&(DYAhV_2FtNvZCc40T5YG>4n)W!@qQR=~4bS zMRfcge(tJP2+ZexFwD4X7+^`)aBv{55{_tUXb`S?UBF(ih5bilZyG+E5B;5+NR?!H zsphckOvN;J!aDGrYvEPRE>+-k(KFfn!#iNQ`PRvsrei5F@AxGnoi!1~{yk`_BP~@+ zxrBElm@a}kF|70xvEa>8{vtNM9bf1azzutnge_@sfze7EdwH?zmY#>zxuW87vRXa8sezMV}}Vn6E=+PLKJ;TV3< zsCKN@{*~;-r_f^gkKv@4R^GnweEpCKzRcT$`+E=P>aMXtoWDm3N=kt*Pba>T-`DI1 zNkhZJ*w-9BaLWs9MRF=z^RRN7jIym6=-Hj^jjqpA2psFKP!U^g=w_<~eUQ+~&~MX` zp{fJ2k5-~Oj`+6Rt3SWYf6#wAbRcVS9d63XB9|37sssKu$8n)KFR8h1er_UxL`5_r z#mj>%7L;-K-roOA^i0bcA-F%phhuUTe!OKsKNAw6loS787RrLPmEi1tW#Grou(5TV zAH)YypEcG432Qq4nlWRl(mK!o5eGj2c6YPg_Vja%r*gQg*%-4n9?OEFLYt6e`Q4nlvVFVJ5L?r zvREzV4

5&Wq&W1l#aHlZxsrCn*j=o(^R{=vBSV)oWobd*)A&sC7X(cmWnMoDz|r zSR^!b_sl^gdwqAy?TKLll^5~%`^aAo0up}h9fHV!WrD6)m;hp$Cq*_kM-`JIUX)I7 zegeKrqHcm>zEL$;L*)Db`~E$Ft3@Z~xy!-jWw> zl!TF^jtP#>K=ub*4HF&rW|AdmYy)L$jy(xZ(ns4p3~))=TKa95iOEwyyNdgVsS&=O zSec&Lun!HRgoVA+T7AcWlGA*=ey6WGEm>WvKGn`rfF!ER;&vX;&1?Fd036Y^>+?la zHlzYMbKLt*r9_m*a_7Zi>`Q`=u%{hCW~)>3$BR+^b-^RRp|rV5csBf_>zUH)WxhXM zHa1FLTPK<)sjz6M3ArOGqj`s+UN>*T2@|eq6nA^;_PGuqs&{xbOUhZ;*r-yst5)hl z0L0nN_wV23Z@hO?Oyh@4LhQj^N=ix!^77e1&CF#-4IT5u(zu%j!~M$@^gm+p)6kGG z*uL$d1rw^_ERbxete6#4wS;#Gf9ZRpf5o^WoN6t%hBn)1AAA5Zu*z5!K>}KzAjU!P z#yNXCn`8&}zW7m6L{(z+l~ttMl2~BWz`q3p`D$z?VvH3YPF>iFur@>1?({%r2X<;E z=H3%PK8Y%1AqsdW0WKP1q@}6KoJXXtbfRz=aB#GY7+y9PEtQmNu&(G|J4j7cV9u?s zos1N?5a=ed1(&=$N#zoi^Gf%z{yMcLw^miK=7s`9nV|4Rl+lk0X*!yDB$c-1%awg> zhgB;&B-37Yhg*hq4|ap2=rk@2rgbTMzsJz{d4L&q0TFbDo5X_kyiL)~vx&d>{T_Vn zZ2Q;OZ^;jo!qXBm73)p)M;beB8%f>_-s!R*`QPwvwNfis^*DC9uQZ$|BOd?WgN%#R=mO(Cv4UZV)VqF6)Qsj{_4#RpM~$XvC;oS>x=yCK>~gCJYDZTxUucHLrLSdws5=~ z6nV~>d~U&BR^lm0pqLz&fl1fNZL2`bQ6v@pi>hfqhg(zj^|H|^N^1T6F?P2V$XWX} zG0$1%j=tbtc?h0*A|ZE7z$#K+1fDU`ikd*QoJl!=Id&K7Z>JQWOTR8G+I994v^B+doi!RK#EY^83M1Jg zX?j!o30kW2abm0}AIgD8`<{#OLHH=kpj-_(h^wA~Ql@*{+d{vo(PHv?`f-JT_%NEz z7juVK|8+f`uiRCmKzL`!yZ=1XJ~^m*sN%x3=6{O;zjq5NRCV_-ee)9y%_hL)?#UI8 zP&R5S{h111acmq$JgUfbcln{gUv;ThW7+@O8_F3n>eU-oSd8q(lY>y*@!riTAT2d( zWd{e2+|Y)Ze*J;%7V#os1)m##JN>R)tFfndYnyx3Fy0?495A#hphznFg_P8qWRRNv z^sWD_wF6^UiCM#C(HG5RNnYmpPAJ#BQ(to2(zHm|tdC7dq)B3ly}O0NL;PAMkB^9f zkr9iO6jD~kq!N`sk^a3Ys@jt~Bci;V=5svXZnz6Vt$S0{PQqMqyit!dIjfrK=eFkS z>h0i5wCkRPt=x6mp7Pq_k`hHB8{UZ>^#DSgUNozNMD^^+>J%#3LHDLmD-^ zPHeyNI1SCfxkPr{PHT@_>^)2|W9vAF<8!>s3j%lalQbY~Of+2`1sDW$x=kZajq@`m zZqV!zcx~N+S~A72u8>vMTOx;hfZl1mz^bLukhrfn_)th_?{X9tPl{ zUaostKx>VSRxvIT-pExO%nAk?T~&;)lHzRb#@5zCy+1SGu9ILk2`qFcPN}0>z2Ml{ zA({x8JCU6XI28<;e9huXJW;C;1r%nb)yH6{3Fsyj>wT2%Pl$n2b_hqJ%dT?Q!bA{& zU?OMxLSM6h6hhsZjM4L*$Mm;L9k*;=b+&lJu2GG6(5ItUnr4wuetup$1tJE-?l zl1MdJM5ZEB2r2P;XnI(aWpgGiT+|(KNg4JbA_HioEB7E{O*7cHnjO26MM`z6j)k1K zfNBhQZ9*lFJqomS#+EvA<>P76thwdB97(0*H5>80!%i7VtZn(j#-2~MPlkr{=o|ek zTiMukcOx|2AI|L`udSl?w~ot5s^OY8{und=s`QJ=kh%LgZ}7RJ4IO2`vqC!Q(}3*k zG~lQI*d(O3GQhHLt1F`E?6D)Pk$zj-?-A_rk&snIZtJbRg{MaQ)V7$ z*N{lsx&qw0N#@CZIZ)F-$0?(gqLprbfP&(e`;Fom&Vn`nX`E$kk_`4`QUJkaF_iYw{nkMiz4l;)^)<_~JWhJLv{%JuMyqh9x zt7X6)XI=h}P!Xf62mu7|qUNblqkA_UeqDf@bB(OhsG}Pp#D{CKO#h4S7bh#zyUmmI zF);~7>k>$DzZ<+&=RZ3|wfY$0prifRRiCi`!IfQfDDF=fm&-Bp_#aER6_W1VP_D5> z-B|T}GA*jj7XZkqJe{UsDkj0Y=jIyRMdQR`cXICHs2H_*-N44L;3jzE-9b9e#Cyw{ zfd*(!mJ?Jieu7~e7| z=dy&RjKLIC^=!s^YN^Qh99zC!zk-I(0T`UNjo1=YiI|(%+7HdPk9E|ThvH@zUZ*7N z#C|d+gw@cCP3WI$i?OTEc^(^qAoJm3QCeobcYE=i{pqn+kKsUn`D>VRbV)r=h-H=> zKQdH)(5XiMLLjQ;QFIJ@oIE=0rA9a1lh$?)Fa!XUD;Jch{3gDE5D)rTeI9xJJ)76p zkFUN(A_d%sRlr;3_I3=!%A(OmI9fc^8*sqn=T9t{9W`WqrvF0z!5&yHf5 zk|&1V#IR9mcCxZO_QzPz`~(77GCPI3Gs(+!#gBi)Vu3$+cmNN=3jVCTh~t?&h?sw9 zL)DSQkS`c12- z0V}Z)FKg(^g)q+C>?xmlX@(FNy56!dR;8W*3Z*b9BhNN_wP|R}(8U>_wcc86_SUC3 zsx^`yjbA3l$G^1$C@`6%T=53dZsa6q6>X!y_7!+(CX@?f{;s@eiS4VAfQyf}ngk)i zzffmBSW$I|`vTs)xnLDf%qgzCmPv=AW8G=3+}_72L}pa0k!}b#dANNDv=wW{5-Z#qrXUqwtsI^G8+v=10nN8pC zfFKM{J^*mdt(mWT@M)c_XHRjjF+*(-;N9Z6IGcX0%lqX?^bp3hrd9-2_GtvYvy6+V z`rNJz$u`=bnwVT|wY+=!Tbm8IBWZxqC)P5_KhXkOP}fux*mXuFwCh>Zr4>wJg1Bx$ z4x2|o;m>X;#!b6`4{063-+%kVH!St4h6k8-k}%`Uaiad}nd{X6#cup7=V13UGrc-@ zwKdc~rv($bkytlfg;{5wyA(bD1~XGy(M7O zV&fmsOBk2$TV!;KiNehp-i@CS%kn6(qRo3asn_C|be z{q#e;oRF~Li$BcXq}TJVejnCN&gUL*QI=u>tFwnj$n6v7rzT{^MOAIY}W(V=z1hkJqS@ZcCc&+%_DHfLy-c7;jjMI_& zUnAIe4K(`WWmL>ynO*+%dvvjEu(0SI;NW6YdRwY&NCL&JQFwpCNwtUxB+ZqT2#n20 z0;}_YfyBzMSFCEH%CVKI3&`gd$F|_yb>wZ#7)LFw*UwhbYESOgIv3l9+kMJ5eQSzbCo^J8>2m} zN|AC7gNs*-4%{+z3`p&Gum5A?Rz7?aIch7(+K*~E6(8tVHFBRgs+gZNO;K3(p1gcS zo_?(pnUwaCNt^nFGdj9zVGp!r$mS@YtRtYxm8ygw)FV|Df~9-e;=|e#uqwk6vn&k{ zxSZ))e0lYnH73J2+sgoMIiVY~7>b($yWKF5b8DTC#ZqX(S3eW9+c^BPw!h2Rim3zY ziHL&!7sV=73SWn>`)w%rSOnoyqjM3ZrE11#DHI-D0==pV?Hs_wJlF)1^%OF)dHYtE zZ&)EhUH3Y9-=K)~7%-}mjB3`(kM6kj1PlRb`>;A%I_-swbeX>SxlO=xmO-{4xQeAa zd+O|nGh>344dByi8U#v1;XN65z~vJhJKH-FKmPxCL3O#+*Xs3o2g5s#kXF!0bIjL zIq}A$h3edi^tWcQ%=3XmySwK@JZJ}dfE=(ixT&u5>hT_FogCv*;GJDn=XBm=i|V7X zIH@l06ox5yN#Ro*797M%?7Yy(R5Rk}`l&_Is8(Y{$g#16r8Wo)Bg!Pleu${8+(Ag7 zM^TwmB|7%FiGtNy+Dw)gK-uIFtBZ1QNs~>oCjCtCUDlM|Pxh5rK zEbZzIW39VeGE+qqLKTx8$k%yU4(l`H)NStU;tmjP-Vx{7`&J!Y!!8VTTF1y!MEl!= zUo^j7%%3~ly58}Qe2sP{A9Q*Rv>Rn&G5(<$Hx(xHHq^w5pKU=R}0-N?}0 zph(Vuh|(bp-QC?C(%s$N@ExA#ec$i*7Xug9Idjh5``-J$*SgnIKDy`~lS&((o7|Gq zOHhW{9>fROEg}diHS`kJHGpt4iE#X|7cf^U^hH)$v0N7`Pz@9UT^MIckXY1odHdpi zUT$mOymjSlQK3L6=g14M;aT@Aps{D$sC=r;36J2lA*S;Wlrq(g`k04kX&ZSr=h&MVh#Lxb01Ay#Lb?ViOsriU zxP9MZiHF{EGeVAptWX+smM0WG@?3%-Ez`yQ#Cqc!w%yn(P5ax*02NKV)w}2B(zvCg z2ee5L_L+He;^?r902^@{0}uH%D@=S;QqKoih>+h(hCzk>!bfT-xW^!^Gym9w4u<#G zBp%qM41{M~t2VGf3h$Yql4==AaHeDUuTy?hjYsm&>NddUMbA`h3v}eBjYXG>DsmMn zkkCZ}?s5p!G!zle4BNkP`hfWlE??v-aX^_Fhxgq$W1vz)BVt>HTzBr`m06}n~* zT7bR8uMn;R@2M95Y*4B>^JO*di}(3)@G>EGA2N-gr{BzGZ0X?5sbf$8UGE~Qu`%k} zJ*<3o5&&Dnsgu~*ot$gkTrBa}y^Yz}$;m=;?!OI8y1eFn)4bv8-zoG;Pr6cNGJTka z?M+32o3oXu4}Uv)41rxqQSDr>h`}`&@Fn$+oSe3X_^~mv=q5&5W{DJOxgJ=RL($pUAR+(o450Y{uR(2NT#wc!vavq{S`l|n$MVL4Ae?yG*>7~^w5o-pU{4^iANgKS0F z*3veb1s^TV&*oR?k6cz4%o-8Ere6sxJ0JzynD+hpV8c4A9UNpv@R@pxJtG5qs)=%n zpJvOuRw=@Ib3c72!Qb`;MN1B~Z_-+(a`!$)V~cPTO7B1avJ}*Bw+Xhzwh95=W(8Dr zn;dh0BEE$R34~Q25blKl{4i#zVPbMcqV+)|Kd18Vd63a*i^zf!a}C6R*-stw8a;=3 zkr80+Gw>MaQN)kCe99`w;ED%E)#;7Ln;oMUr*DtjDk=NPDQC`65{joaJIR}?NS3G= z)Tf$+$7RGiq`p)A)EoJhB;&%z9d@u4+asYO`t#_`XMMxQ<*{$c^?*!HeZ2s-ExTfi zIR*H5WX1{TeZo%3|D+yCsx2V=huZsTlEOE-?TC4Hqif8P0plD+PDDt!*39Q8uOf0! z$s&j@mgwMN(TDF%L1DRV+Ya#*{`v9}*38ejvFvuJopr`aBK7c(9FQl>_)|+;27iI; z-}z6C`nBu({WgVEHs?UB^2p0tN9cjRLaWVBR^(i+QVhLc_n{L&$q(2;0lt(Y43H!E zk*~ns3Z(ElTztJ^Y0edIXwgVe{JoKFL4osF;_8Hf=)y+w3WUnNd#v#TLVJ3~e#4T@ zrRIM50}%3SX0(XP{7PgzHI!MDuE{U_iQmF8;s3q7e1vq^9(!7PEpm!Yw?v8o#~E%( zQ1}Y1kBbkUfDkH(4oT(m#)30`Uc4E(Fr4A;*F?Y$M3}H-@SwT*6zP$4DM8y$R2=F2 zPnMkOEdAqkZCy5gabG3smPv|9yp~o8po0f=0Yw}) zpgv#TwQ0=2W$r`bYs3w}L;tq=geL?B38hsPq3X?QMI^EaCnU&kHIur$;k?#so+i|I zWZ*nhY|bSC94FH-~_$hQJe!56G4z7lXk6`^W=5kg&Zd zEYv@qj&F}z;(1=SP!O&zo+okUh57BLkbR2)P1r;;*n|Z|VVA8We%4wHHJ_Y(-86&$ zgU@mHJ0=pM2(xnmMLHY<06qF1&{=(6bZeNWA6{|_lsQRM^B*LQ5Gd;N5GnSopOh#a zl?D|8WZD0D*^H?&7avUYebAGy-8snPwK=WqNbf{+`m3Bk&ZfI!VyoydUA@Z5;Izd>q=fL#w*03By3+Do#op{c z%U*5?w~>O!zXzW>x6dPV{xmn*TCz1NKqdY(z%NRh_mCry=+Zd6?eu&4x&$d~MI;I_-5 zn28{xogw@9kh=MXzpkXP8gL!^-<$JXgywwVeDvxqbv@M6aUTETc7Hy(ytDJQv62nH zFxk%T5_Ig6O}dV~cDGnM10ho5eN|VF--FKkzB@-+L6eQvOUG`aS>g=)gp=Ct+tMd1 zTj>|^2?n`rfySCFlqZ!fB?2HeJ&*+~K0qzP!2R#Fk0_B(JFaw!4Vy(r@{3nZ69EhY zy)`J`=KMI_wcm-zVL9->mhidU>=;MWzh=6sMLJOEtg$A@nlMAFB5p%weDy|GuPTR@ zVlXFcNvr%59N8-RGGEY$NDF0kUG!ND$t#YDor-Z@WcM#@79#H)&$F|c&@eD6Z#64F zD=U8k1h!WILBQ_`+9c+>0JA9e=*TW0Fp&CKSV>Q>uu&M7nANYYj*2IL%tGN6;P({_ zgNdxT0_?YU^R_*|-W(kXlw1Jhx$&#pTRH}Yjo?LWcqchq>5ueu`Z@Q_r0&5%h@&HT zWo2b)$>8N$499QnIVLx}IQ%F_OmtaCy7=8(zi49$=JBbiV1OfTxYGW?&Pl`p(@eca zP$FQ(_D{_TmAd5Is@rbDnh2YA<+ChBVJZcMi&3)LnhbR9DyWQ?mx!0wOp3Ukxr01*`(|Ei{(jHfG8wr?{U5}zAPiuK8jo@J3_;nla4-AWFuiSzPvc= zc~FCCp5&}HJ5-JL z&@VFp*v0@rNVWQ>nuqwQ)_MHeT3bEFHgDaQYonv0zUSt$ob65)N$t*{lj!(q+tjQF zFXsLGr~LBot1i!lYclXeIHI!dS6wLR2$yy>OU%j$|!8>kNK$>7+=^tZ#uuMVO3RA zYwcJ!aBPf^CqFw}q^WZ~46Jg!#fKf>M7S>0yI);zrW~(?aa0N(94JUhp*{&c>NpPY zeuQ1ngIb+~74rJ~`&YGssuvz7;)jN?E<@T(g_GXz?Cfw_O|W{MjmWb6N)yDQ8X*wB z<77}whJ~@$X3Gk8hm#1s+89g`?JYeI3U3^#i)8WmhoNh-3ds8KCHiM}948Uej1sq7hL^DN=H@PpS2}I9+&Idy?_y4&~ z)w`GJ%5YlDasPpJ7KmD-@jy80kKT!#_JaYQS-|y7VNl?!AL57*7(Q@l5>vZM2GlA* z*U6RFqohy9cw_5LkN36_FQ%PDj==yqZPIX~KTcG^1QQ)S=NX!(!-X?nMMXvLbngj# zYD(j@g8Uuh3#wI^+ek~U%!Q=Iq}NFgBTLjez=$ce@|RHJb=vw2ov%IrO)`y&@$B>A zbdC=-Y4-;?IgyMcy!Zq70{nkM=6`-{(^IBA?-x%w;mnBP)dg&VDibUS2=RcP0-o80 zv=mq&r7hc9h+m{I-+n3MA)~mW7Yzb|h{$1>)m!rPx!KNdt17yz2OEq9MWSYApO;&Z zihJhs9$NBF2TZ>m%~Cv!O!;6{T?nV(SK!yR!i1D*I_6uI&bB&A)&(wlUNDzjI>Q&& zRT_xn_-KZt6!*OUf!wI8H%jntRK*B@Zws7Z|1Zh{CDo|m?f*MEJZ)-DFMyov|9PI# z55OVu|2736MFMs_Ad&h1|B@5{)W-g2JN)kjTsTTcM@Q{<3#w>EL?{sucq_n5=&z#u z=ng2g)$Q|1Lm;_aAAIk3^_6sVpp9StpNqv4h5@9s(2W>n#~GQKX&D(EUce#lYg^pNhYufa zgi}Ximt74H7aIQeyF&fe|kgZ9fdM{9o%^F)zt#$bv-7n)%L5!*0X%) zb%4WNzCQv%r{{hC#S73Gb$htpXO|Ik>H7bwTOxXAycaXTPgfObIRq@JwH(e7J7S_JkQ&S5BV6{I+K0Mp& zfS?c^9i5an$6F8d6^j5%s*tUSwv{sBCXovEWYU ziBHC`e&fT)>W?pE7N^;50K~T#;NO7(z;L1JxwyDc(^J_!6Jg)*@$uPM#9vumMYNw= zW+ep%2V?!wQ+SkAQi{BpB=`08l}i@U9Jf1d?Xuv>;=X%05;ne8=#S-U^9lpT``v3% zm%S-~e-sBckGFl&tC~6u++Lno#Ky#|J50#~?x*`rw7AIiYEYQFoK(u0Of2T z2m6wG!a%!`a5LGn@R}IDG1d<7il+5}JF=AmX-uZ~o`>@@z!UN6c=Z_YO3YEU$@Bqt z+yY>#vSk5a|8(mgsRz~RWM7LAyJ6ykPWibmdaKK9vRZCbes*(oTfjw1Z}wdA-aCMITH$9zYJ46iU< zYHF%RwOuOtp)5|BTaXjN~<+(cbmFwm zZd!3*f|-McDM-B{uSwp%!|1iP-vAN@8!-p3@A%HNR~<lTqK&76Ck>o!YrG9As zjp+wc?+e_saqA4y!_c{kpCh(tjDm~;W+}+T4;^--BauYn6y0-3Yfw}z?S5APToy%f z6*pKp^jG3XVo!$fxB|&S#vy9*>kZ*^PNp>3zGH5W zdiJvpk4c{QDR~tQA2599+*O|}KiPho`4P|b~jm4m!>#uRZ`G$Z;%3cy< zpjXoMM_|&L^6(>B=J{tRlHCrytAXB=T3Bp)&POeK{H}hkaTZ`t?Q@M=Y%4abVb%Ek zX}4Ddp7ab_tyE?fws1_G7Z-o$eV;&Es*mjAWur6Ezz{fy2$aVaiXxX713SO_{)viy$h;QAMq51ik)c z4%~j*qj51B)m#=67JRb0rtMZEWay>ys&O$M9x9Yq-WyL#C;UBUd4HJ#`>NY?aR|_m z1OUEfCYK4wEANWqGIXJqUp|oy+5fyclq4hv4)1qj{0KIXQgj$@r>f@LH2VWhot_aN zl)M-#IJwm`=pqS<*qQ9PPBk_4F_foaCX{`Qw=UTxo|QV6_&8){#lB4 zwS&QB;PXdRhwrqO8$mos>@o4Sbf5{?pgw*2^j2*4r@zw3gPp6OK(v{T4$@A7c8Oiq zA4APVBk2+WiaOCLT@eRZB=(B(*sIP@>oEOX_QG_nt-5;L9Q# zZsnnbe6)JeR=F}3%{D0AAxybUF|-1;6@_YDTO7rw9uuf?>>WCB_BY^UM>{^9iCmtd zSu!SGtnOmp22j;*eMXTmBCMy4Uwuon*9e72y!B||(|asl=^))bZUw2|k#xYMqft%)PN}UP!^02W_qejJ*uPad`?Tv`?ciQhjY^ z%lT*|)q-zP;$wH*AJi|zioG>I)?W5avOqphwD?)>?cS?IPFAUrC!$` zT-3m? zP%F@K#}dy{8^y2RhDETPm;}P)n9JcEkk?sEnOp;*>LS zk3vlE#JXCepE+(zxf3r9Yz={=E5Fk{=YB7J;n8L=4uo-8s~Q~LEKhQcFwiOfXDd}ufosC_NhK0+a7I8cadvi$H@?tN)=W@`lIl5yn?-K*51-V9sXr?IwWVGN zxo-^^P=t6T=dXcoSEaOT9AK8d`TSg2wG7$=&!BEveq}{4WMHt9;#Zb}uHgB(y%y`D zg{0T9$EP#XqKg}U&qN9n8Qd*`0{4v|g1bbY+YO_aW;G$f8piDsQyyvBa+;Z3J&PmB zv*7ez*sPuy?{(ep6d;*dWwba*UyYp% zJ(Zw1i*r=Xll&BXEjuLsLxm_l(Wg?{%Nn1l2E`*!JwZqJy3Hh$sJ26#+yUjoz|fjK ztwL4{xqNTDsiry7$bTAikI@vTMK#EEq0LK>?(1nPvM3{<@w{PW&O}g8+Le1e5s4f? zQ&m6znEKl$TY{aCR#91Qox{ z;fOt!0~QKy(MQX-rjAf^g^T5-dVsp-E)3uX&AIA+yhSKIpPqe}MQlq0Qsa8F!Hq4l zHm#d`Ye>EXjOlA3hFv}=vV=W+A(Fj5XwctkDqkCw5hGK){i@8<9BJ3y1}%u@N<}!9 zbqn=1yE%E)%Qta{P;_DYtRh~t{DxNJzC}yQ(a=ZKN?(RFHv|`#f7-2ETAN(l;%UP* z2}Pwht&0RE;*My#9G#_fKwU#Aj*(WoCC8skH9rars$kUUo!X zf*Q4nbQLU@3gr8Hl+)E$y1Ean#Ydo59LP+>k_`u6mVy>hwZ%AZ!pdXV2kwtFzBVW_ zWLS2VSo;hGi-33T!uG0~?p{vaL<+v}g}qA{C7weaXqsx?u+$+ChE3rI?Rwi&J|F>0 zt?X&qmo4eYyRew9f~^Dw19Dc*X=8hSeu@}8GWbe1+Dj~Mn4V~E`^#5GCySk5sO?{0 z<4uO~C(%g@d?^omJiwB8X~k*n&CEzkfZhW&6DN9V_Z#JBO)juKspqI8ZHso#EP=Ac zlsMo+ax{Ki+ zXAKG_|8t_T)osRdnM~uC@%U@j7dhUV_87X=xnT;Uq_O1VONcK8C0bWGTx1&XU%J%A z(jX^~`UxQX)7h5k*6syie}NiH7Uff>2nd`QPpy}d z#gMJsmW%gb2oAqTeq>|nb@eq*kEfA%Ag>GkCpCzn9IeFxIH_8EwUJS?#JdyKS=w`3 zgW`tnJ?CIl751Q8d_Cp0ue7wrp4a5%qp1P}$32Qr=z%_~{0i&Vzk5g(^gw2>=sD|3 zZR4EXox|fPPE^Ct2&r*T4*5e#=jjw3QFo%#5+QrnpRZ$)gf5^>v4Kn*Pam}0Fxc*w z887nU16~BiO~UL*Z>{5no+W|MC}uK&oOiREm|4lW7?r@McF%y|mGF0wRNT$QTxp#Y zLx;3Q&QpIJ=4l2r9iD1J!1f~C)@f{HQC{eaTafqoJ$ep}=TJXnqny`b+nd2|`nK~& zS+O8Fy&NnmHbmkdEyCr8Yd{jcu3}p+&PDZ zc8<9sEzl`Pyv&ardm>w|ca3d#;TZV0eACJPAaxHUDT`zk|6xqPn4To!hxny<*OVhR zVsbCJaYu%(m5pTdU)D(PC*6!WMQMQ{0*?GHGf|Hl@>eb_b`QS(dE%HG_4H|1rN)a@ z+d7;1HZFWkH@HhI?g}nK5tOEsPuMsXFdJ&|*l3;lWc*BGCjOR}Vz6!R`cE?UVBFGE z0=`zIS6tPD4Oj|`^Y}ro=Bd12d2sIZs9qL z*mR0>3!V!+89urxqW*Do$eBmwp0KA6H~Ei}%s5T;#W@_-=k_;!`UW@4HsyTdKzG+P zh0_(6b%H1F=+gbFG?M;n&kILL!F4nN{02Ia3f>vGPjH?Q(ObWZ#sv>t+y$HQeVo0g zJGwuFZ(VkgAk!qi-CXBeiuwdobxM}a^c?+Q-`{=PGD$T@v*n2KoDpRq?0o(S-;Mu1 z)zEEYW7B!E-e*4|Lz3$I_z)}N&b={|5)mCOMIrAERHNM-ZR`81YBMD;MpMA6s6?w!lJ~h*&5H2r$fLmJ4UB#IZj}h28{Au{Yr{rRH zz(1OOnrU7?NxCZd-76N#KWa(PS|VYyx%NCW_Au7UNv~=O zjdgqGEd0d`n>=2&6%-;bRmL)88I|363x1h`2Z(BiRWDkalvUUp_73$Ut&q0=&M@)} zzL)Uyj{vq^axhS`1zL%o>QshP#fDcv{qw2px;iL)D6On4j9>`>e5#FhCkl2-Ek_w( zG~x?hVVT;J5zY@bbZpS`zUCQqP~)zzo0A4!V?8}qSv`2k!qSLXv}NkK_3~g`3BJIo zZF6h_ER&O15&T0C9o%tVfBTfA8cVsQRwd{ivC)v7Pn@agvx`UEP!0Q5zOJp=j%bv8 zxDYUIo3s$ZhLn;uxws03LS&3YYRA`Jb(g3~yS!x~4_7uJBrl=Vm-^41g?gav#5s$@ zm+2LR56RmTE|7EFQ1@=tmDsa{(8oB&Tc>VqoXd{-roXj!kcm%pgX(9@Y&PP_JHELTp@d9aKhReS;0%gG;f|FGf#x=-m9vix#7|Jm3C zpd@>dhfkVBe(TSizqd9}YYs_}`J6zRxTIT2^6>I@NkiR%6EFPIUPx|- z#CDdQf?jk6FwTijJ?1_pC|wSHzq!#bFY1&%vi#e~GZMN4O@BAUFFFHr0#~?nF8bpx za#DX<9*Cbn>}=$ZP*UN^vvn6`tE0@et#8pzhgWnQT{_5UdIKUM!o>?EvtnKAs^A-E z3$vd<9@S$vw5(TgYGo{*+x_P-+l&ADJ{_D-kDggT)7t*~$W##(S0=d7XCzpyYH~mR zg1{>3iwdW3#Sb?qtya=6pTN_!F2kGa*MgP4$W3BqiKyyB-*1J>1}5$%x3ULI;)&{D zuhiqLYAoLl+Yds^+s0B8$n%zK9!$>`?#Yc<_z{%`;$MHMr3%`{h$#AVu?1w95zUGC zCW0H)9vC*=P60bgvB#T9|N8xtx@#XV>vEdhI>`+nYZGLC>XglOQ&Ixy=<0$fp-zLm zadIrV^Lq5c^z>+D=0gx08+Nrqosg_#4;<}Es33hsV$23k5Mi z61$tNNak-0?p#vDJCo7oJU0J`DuP@Y4s|DbgN`BY1g1acq=QWI6kC7C7qJIEeBbUO zn#7ehdAQ+UA_2uU-N{@qU;AwAWBgM?4k=}{Pp7|R#C1Y(^o3D6a;k(ChbMSaB(<<& zZwIJuTr?7Pk#zs;Vlj8YkjpRI@Uik&KtGt3yT9Ps6Mti76>8GJ9WTmq>t|dB^y~WL z+aHXu5|$OzUBF*N>f#2KzdV{g5yIH>L^T)Ke|NY))MmM=?U3x+?M^gn(ESGUks-C9 z**@s*_O7n{rG~N@ZWeFx5RjCQV7aiBvf1U~Pw2Sf|ZWAmkWI2Jaa7RaP)<5FnqhRbs~f z#YHUxU=7=429zm;*?kwiH>A@|T(Arc&hP?^hx~3jjYv z^>TNT;0feA?xe8I?juwbF>I7E=C>nU`!Udg(T~jTF?zI6g91lwoP+ck90pdV>i)n&!suTR7!!`jCb*Ce7?L4IGr+`20tQ2&2 zFISCcW-2GBuldX)0$9hT6;jrP7`bwexDvflBd?4hT3~r=z(DMuMc1h(O1_sJQ9hP}?2g zHZS3t77!lygA9ndij-T|%_CkR2tI9K(CsG-s9oMe6Mbi1H7XeyK*)<^urGWd8-8p~ zS|=K$uMF_23PvRWL{i!Ayi_}uTtO!qnnZ`Ah&PUD`0Nq@I8GhGsbsY z9M4v?Ve0H1_=QA~Z>QGvB`#w3(vkp0iUV9fH5Y5yessJiQM=;)_i?)O4w_;|hhpH5 zfj>G-`Z~Lpx6rsfvBGGsUi0nb_od5#aOOVOZ%KE&j48df461xQEwlSfc=7J*`5oXv~=s!zL#F9plpZ8KODXGgEWqvBd&SV7puq(Hfg?eV6o z+}Y*So$P}dA6dlrTUP^HgP9b(ApastFil$z4OF9-ny|lnSp;fUXP=_ayb^ytF4wj> zB$u#RQ<7-)&@kLm+SMoYv1MSyFi}+Oiq_pLQpt0`eac+KXo}EFz8&W@o zovFQR9>dF-?&w0!U}V*Z(WEcc*+3L1s)*v$5l_>LiRtMscwg%FCkc}PSYTCX?LZtD z6F`KOnhxh`ROT<^%#&M!XncZceD6R2AL84K)y))NvHJS@vHkc1B|7Z6Bun|(7WtNe-%f9Iy5p)h*|AF6+)~+5Q~cWJ>+N*$ zHSZrcgA2*VpGVy%hC5CaUnT(9$T=2YJ6I8&Zp=x8^TnX^&ml=hkxhrU3Cj^*b+mTJ zFo(I1-nGRU`iqZB`Zi3xnS3+=Ophl~`_`SgGg2D3`TDFr5NZ2I0=0d|k4BfY33x*X zwY%)ks6|KtHiB}Kie1@4Jc)<0sHo_;H(BJyRN-OcExxI#soP#jm+JhmeFoT4fu#aI z@SZvQotnoBEnJgR6Rl_jDLKr{Q8h*}z`m=olx*~^ye${M4x}=UA-Z<#`LG=0VpIFk z0T`w6#Jz2)*v9M~T_ElJ%J#*!X?mRF#rf4N+$IewL@0tp%0Qd=E}>Il&EhGQsfLVx zC@ymY$|YWNN#SC@2C$yNrK3<$=H!^NEnh~?BOAv@Ldjvd+bQAIK+zV1!i8%;ZZ+$w zVNI_Ci#Mq=AGAT;5jQ|I0&5JfCKuSXq*Dn~(mwc7w)b27MJbE2MgBs z(zqR9bdj=cQtZvzo=eyWRDsUW{Fsl-rZky5@8IWY;iM*fL$&2VhA#vT@Ayt27JRwj zNuVfx&*O0!E%U`7=J@uo=~1Hga$H>>uCDL>W&Uv5Jf#Ch23|TSG$)-Eb}Ck%Vg%(`K z4kT4i|1((L=+>%W0_`K8Xr}$V#NnlG!;3MNtga!b9S*jOX!!Dsx+aNNYffj|-8BkV zpO9b2g^X>^JUwdkd%*}LDx5aFtncgc?!mK51lgiYEc>7Xc8k8Wi>7Yg4!6%3XZ-38 zI?)@eSw#S1)dBjz?}e8VAHm9Lg1GUK^Em|Mn!34->W%4pQmxO3GrxlqLeqZ2+p4>i ziX9e>aCmcAetBCpx~GUFmmd)RsAm9%Ot_}%ADKiDO_q;IA%7y~9=vsRvyqCaQtxmS zjvEoOTBj^cs?9yd$_Wmllh}TK=pA@G2eTO6rsn<}vfsHRX<>?%5bhZ27;0g4#eeyL z)%B04n!f*s|FQ$u+ar&{gL8y;#xj;>7v^@8iea~&%rO~6q>iJh~NyPTC z6D|Og)$ml6(-vgo09H#7wGS1ct>b)^v?P)i8OSZ<;81Ok&1tuMEj}^cTxmXDM>Zb{ z*7@#PFw+*u{3AIX`f(Zjj4RU{{4y!WY>%fmSa5W7JIO4-Z?!Qtv+K5m3r9xeasI|1 zdhp3#p9a&Xb|`yP?=F3LyOQ{Ud_d{n)Z90KDUaq>+q`VCOG(xv_NvrWFeb=Uz2dY% zfRabY(jxjGM!&pw!lITa)C5N=lil&~c=yesEe#z!(F)};quHc}sDqJeXwH$Z(OTMp zEF|NI(j#YC0`b!-U+OJg#e}Hr9yXtdaWrWXs!rdPkUM_0`e@G}lh~y>B*s0}F^SB2 zWYx6DnwxfZpG_CWGOA|KD@e7XaKca8L%ck@w-qjW-eTbreL5ib>J!lTb~RTn8NXvV z!%x5f+Gcq0&P;cV<{PS*%fhh9rglgYX*L^hxsYgF#A02DiimEPX(n(*#id3ExVEv6 zevCw1VNvQOo?PdPwGg5ej`f1y^zZnm!{4mcoDScl*Cc&6p5LM#fo7ghf9$LB1= zj0h5qrI(+1gf!Zbe}v>*oLVJeTmYtxvM|+4Rp&o+Hl_CqJYV# zU!HAyf$S#kO^^QX%5tN+9(}~CO3VvDD#qvH??yuoOr_YQjid9#|jH-rdP5| zOxHl<-QR4)6H8ZAt#8^bCYw6Y&;#y%9(OUb9gmU#?_~1R3>#oA*?08kee@?ck7h;r z!@MMd>68Yxg54Hmo+QTZlpc;ynk z6);5+XI@3p<$0^dYTp1Hv*ZaACMO44nJMHZ! zSSs0~Irx zXe9*&;bTt9l3%m(W3mLBTxC*&vFo?k;F!q`DV)NlnOq*zum|v;pt}=|P&RR-#KRLS z>vnnRKTx|j_79(Ylt(SDHya@LIi45>j`u#I>QNEY3%+zS#{f7qZ-$l9d-pjf_cw+lM)nbQR<1G)?MTiGc z&40(>_4bliu;hEn7ET(OIS~4Sy=4F20ntAMYy4g2Tzq3&MP$;D1s%GO9aIJ{K zOfa|cAkcUWoc7mw+p2sK^M~WF^Q7z5o7aS8w*qbU*c7qWT%}c{hNjkBm(;tF>J!;m zm0m@RbB0*<{JzuV!m-N^Fa93c&R^qEy7*vA!#KZ2$CY&A&Af28wQa1*cq{1nVVG>N za5eGA{G*OBjM!=>E(xqzXx`=g$t-?da9t-hw;g}#{(H$H*k}-B=`=SQZBRM0wF_fK zPBzxomMpZ2Jk=iE^ZNujZ%s5@X3Do|1>D@LS8iGM28vqC%hjJh?oZp@OYcP;`VfqJ z!3TC&yQH)`@D~XI9LOD53xe*$eg>fzI-pv)n2);1n=-yFiKU9i2OXf;iE@m~u{)9x zVSTCC1ZW^su5&sn8ZVqmT@ySYABm?swFDjiMC;Qbu)I-oPG`{e2qR~oaR0K2`uaPZ zr~Iq}GNS>XgOu#EewEIMoAyH2Ka zk{^i6+%BnV2V)zC+a+FM(&hGQnS??YQs&zmWZr8t*k~B2M&C=1wZmQ4ZmC#fkfwLS z9drfb?DS2iUs|JvYVHQ}%5Wt;chb`~&$k~kE8`X-ai@-ziEhNfqOtrPX}`B|Cys3O zh}-YkJe9Sslk4GnFyzI`7+8Hvc4_+kOQ!wd)!g$dCppevgEQNS`<;tsvJPQV-#5jL z=;d1Oe^fxF!j_VmTJ;10ZSNa1RQ1e?Q{G`S10+AI zC$r3_C?)9r`K~Y$J|uMhvAx7`LG7$p6KnGNvN_`w@|N&uwsu-qOqrCR$9Lu>ge~{K z#<<*$Y;NIf?l<8S#0ft~ww5}3&NL_NtRMTQ+AWHY_-w-E28?-q zEuO&S^Mk%*j9R~n%%gBNSvKm9jDmI=)Onv&yL`pYkL!=o?R!)0q& zM@QOjWb!=PpA!(Wv=_cD+?U`EAp~l&sZ`Nbz2#*2==vBH(f@ZQVpp@DNKY)zOh!P1U}H^* zqx0JvS~*)D(WALD6IACJtBaHH+J&Rf<~E!gYNdgpwDBl$MhS?IQ}rfo2@q7mg+X~C z1wPA#ptQJT+sy6~Y;69G7va|$Mj|v;#g9kmLS`x*zr6t^l4=Vq zt7UGsMj*+}8WZ`yOHq=@EY7n*P|DJ-Bt!sZd8E;f?+pY|57X2{)6~|U1f0M2>GfmU z!^ccd>)v<$1bE>CA^zWD2L_m#&3)GdCi@efc@DJHl24b(LqB(77!!?s=is_f!_`?O z{g658*Cle`(iYdukg=;5b$=tAU28_I|6>Y6uccGW1`nvNz(~40xDu>SZy^w>OZQ}s znb%(Z{)5XLeKo|le)wJo_pBtJ9>$*!v=#CMh+cZQ>oUm7UQL*;_J1R=Gd?B3a%RfU79wEWd zqVV9oyUIwKmdSCnXs1KA13#eWbIiFG$QQ3aOOK@1omm3yvVq_*C5=`dONbfUEp7+x zWkF9V$%zNVKFM@w&GC$!uJz=L!EV=y_e(>!4Sjec9Vg%93w>V4;la+2d6>CiM+kwo7q}SdPsyPFxjcPGkPP9bnGF_k*Qu z>fa&zV50u(My4t@(;$qXp!ov$P{#N1MJDCfhTo(ZJxO~2^cidFa@jf4~J{=T3F7my{m64Iz@fcu62F~Gb~*k3+*lJtjk0Sw*xizMCjnhdTmL`ArBjycJL#z4oi{a;HTz} zM=%V@CLLx{=8t1IPnl4~Su259A_Q&Lj#h6lG3DmUI(wgv$d+!`FIQ7jMD8=`=Y1x? z+{=(fA38$MUZgH_lz&qkxv>+VC*4jyz$L=J02FQMdndxep%e$E4Hxda!Y7LRz5WS+^s z4hHXO_P=F1;mqFal97}`a%cxyZCnycy;~Q8kBpuNx)5xV{cn8Tj(fxam}G@x)2hcp z9E*|MH2Qo!Ao6s2sAoa{?P1v7>f1H*%s|IAGO8^ccZE=oK@xLf3h?}j0yyAuN*e|f zm|<}0=OCW-8$N!8#*qr!OhwB70`M0GbyQnJznLRjX+zW^xKBf=n=X()wn_# zu0Ae?=$g?z?p{3DfL?STLwBYbCAi(5GLeI9Zt*<@om`vIM*)70OqT(Y*N-7kyTwPn z)#vt)lmeBa)ZanTK#@;CZj z!Y~`isYrN1E&>MUbu z^sVw98AtbB5tU??Kwn;Lfy*)lWLy0Ke_G1aHw$%nH^D!G1I5SZ{b1zWqAnT~V8oSv zi2oX`hWX{;9p)M-s2z5tofrvC7Lt}0zO;!>9KjyqImbJxV&3^9z(0##j;T@Q9WwP2 zW^pQNq&A!B3NPgq`g~R8wh(8ga4c_*O>;nlnt0D@*Tu5LH@$lJoWkGK(o5IrLI2gE zc-h5O;vT5%QdW?FZ{QH%=~l@$E;HhXT4hRdn|k#{v&HgRlef0D$t()|O>N2K`!HIT z+S0<4BMll#SirzYvV|?ipV}gFuy?a>`#Ed<)kE!Di1;D#maN(A3u<;?(_VfGxINt& zVTpXC0tXMjaJWP&W4!aETm|BlxpCA;6F6-v-+#4}nU_bD_PJzSOf1DBrbcTyD?wciC1~1ww=;O zasUkk8i<9aLc5Z3rCrfqq=zqPuQ=`=l-9V|65mCoMWW5=1v0iVV+pIvkqzg&Iw8;C7 z71Q7u4%5eQ@!P)!l}mdz(lwV}ws(Whk&&ArP5c+<4X4kJKLtVZgH0jjn#7&Anx92A z)F`Hr%IPfBi}i%%3ym@6i%Qi`MYw0#Ia=;s`77D*DJLAezLar&ugTigoXlZ_dE`90 ztZ@775EV=+)_`)Qr?>k}R=RnJF9thSMwAL4h{&=}5jk54aRk)nmVqLkgrVVUNCb7k zP6>0VuFhVu*n3B#B{PNut^W`AKnTApHuN&4~*1Iwso!V>|$jVm(r*mv(x-pAoq`Al9j4bVarc7yAf$FNf(p zg$N7;5o@B)L{3SEUZ-M?)NmZGls(bG8bR0fJUfaC5lY5a6BT9f1j22>=QiVu2B7>6 zVHXIRt@YsF)FwDJOoKHQ`wpm|<8$bA z$}#o4d}q%)`^kRLtj(mxe{Vhbq?bDlSX>-dSeBkAsqw!0y{dp~6bDWa!#TB1h>4A? z&Z$OZgs54(-iKsjxNzZO@4>x^7^VRkVw#LCI0J_&rARoo5XaPCuNY^=A3~0<;FLMh z_zUMRf-p_RVe4Ukyboawr@>c5V3&`!+X@7{NEDY9p*EHx5ederURU=$MfVdyiU1|^ z&6zNQBN0JuQmgfE2wymtbBhf?$~NU>@F~4o$OOQyVB3gqoY@FmqSvgMWX%N=w3MG= zd(g=o&ZeY|b4Cp}-P`E}Lq;2E-@J|oGq*RFd!o?wl8WY*1ihut3=W_DNC(GlHKQ5=8cA9yf8twDWJI@u_hY>2vO2mr? zc&7$rDq0~RQnn#Pyt<6aAlEd^+3~!8)BxiG zQB0b;KOjNssvuJ8u@NkER%YHcX}Hvf;r$K-jnUelWIgR0l~@tO2y?vcc;HD7V@@f6 zsBr*rOwK-$u1883Q;Q4{>(6k!%+({v1Rnt0Gttg~@Gg1hwe^8;M0&!5Ii*#2~+$baFh!KEPQx zn0X$tiBlJWPD4$)h4XLbQhlNCJ)}CrnTSNT`8!lt)8hngGO5UZpHW*t!%jeaj0Wu4S?TyJ@_J$^VGhs05Tx6x^w7Vs*9 z{g-F$_d>P+JnR6+1*Kv%jiVfF=37=MQ(8vp8b2e_`QC7__r1nOu!%w=^8HiK0pBZG za9k&R4*MPq*eqnv)obE=q8gySpU>c0!4VU<4=!A|IN+!;k?rw#Oaz+&{fUW*=64Vi z6C();2@*=@OBaU)D)lm}h)z^79V8-+P(buTMi>c@EFH&*RMV~7qr$e^`645g97*3g z@)D8$H`ux;W`|N&jZmfW81JX4NS~{%_Zh<7Ra6*Z4FOYU1vlYEhDg~TQt(I-GeTRh zTZBGo>BAmUl1)_-{zTt5#zv#rR~pP<1RbrUUSCO>`Pm43>bcl2>gq&FBXAfXl8NX$oJtfKhc#0V z;BcbZjOWCTz;S>bK$O~Kx#@bvG2*mdsNV@S>zw1n7Wy42S5p_ydE`L~H<@j1M%aEZ zWEF6J2j6YaHxXyPKfw7!u+y=f@-l3LO)9vFo-$~sYMBI=Z?Q4$ac z>=m}fShB#$Cc6Wv@dO!`|ETteUXN2>wby_P7cOcQYE)#Od+xdN&O7f&c6PS!Sr>;B zh|p!^7R%DCO37?}mQ>bl=KG5eM3Q5YeCc6oz|ip<0|KK2uk-v*trz=O71-2DbvoTg zbov}5iU%3+o(vV;>n5vk*7rixq>|wwre-S`5V(RYcQ z_o2u+@2LP$!9M>ZOW>sOHRx!!gCIc|Gm@pg3Tx8YFc>hv*iUA9BIMsuGG|6AwOVhf zASW#sk_`%1DtKDD?V3WCxZZ_#rJeqY2I-0+F$H3(LV*D3OFYSZPqUujcrx&Ubx-wp?;%Diyq6VdiZqwe;Y z-qHQu@SC+odR=A9#~9Ognu=oViUfUcbwP9=hYMrLCO>C`40K&zM2GTR--}$ha8Vmj zVkQeqSa z5jeVt<2@$L8S4Pa@mFC~TbUlm=yvEyCTLPN;Uy;w0 z&|-AE9Zs<++mJG~Zq$7S6c#mw%^GaETUV~KyBAbZN4$T8yxHvRXsq6Fh!ctke zC12L)Jy=%3G!)+-1HpYkb4{G5h*^H8HkedyBBMA^>5ffy6oQ**H_lr43&9t-VFV@h z$2fy&q)CJpu|};izr!J6x*pKv3{hx=oN@!{%Ww~W$3aZpH_=>1UNMG`+Uy^cS3kef z&T7UfOiFU4@=>se`GH26@~>HM0?rS$%y4s3OEFEyeo@;20r}B|B6Hrg9M7>a-m{1D zJlSrXYe%S)CBV4}(zC)mhl3IkjI(~dieUP|1lkmwH+E~%)Al=9@Ttf*YBv~%h5mu> zpGY#|e&8+EX^sN~d!zGN)>a|Yfo|eNwizA9v3`52$oIYFN+9?3_^yrVX z)M{h2V8oD?#y(MtaIbHrYd5v*+A-;D4g39zO2>2Vdzlg6uAifyo2;e0n%=`ey;P_pt z{6Z-bsokVQw>eI{a8#tehZJPtK#fEfoxXX~FIv0TB-X2xDJor0!Z)DFX^fQT*v z5^VL{Xpbo z6~5Gxles{eIniszev)~F^TUi&)9(R0g-G~heZ7;O?tpFb9Hl!nhh+}8zXO0Q2-4~O z0Uuzy;9PLw!bPn?jf(8y!-va77hNO+1`Lqiy?dLVX?k|%nP*B;Qj+ggE)F%6sKDNw zQzVHo;S#cKg~Y8NFCmS4*d`^U+I@Ya{(GIBUx{eO1O2rMZJLt&{HT(cieMtzN8Mpl z#2K4~!9wj4X;_dKLe}w8W3Z6=g~-L@>;QX+=rc`Wh+ua)!$zNJl!74@sRR$XvmMA4 zLL+6J&!iCv>1qW51P35N$P7`0@r0+HwDhqL*nP(_fS{+Y8}S@yU;#c8GC%z_k>j5b z8TL<+Gn6FhTIIch-+IDK_36x)m zymGH{Us0aq)I_uP$Mp|1QV?U4Alc`1b zZG<5B?Pc4`gKZM&rm+{1+EzoYQ!Y@*)fT|ZoEuWU9iF677<>v>lX{P+gi|RA3kEnI zLGAAxftzt|BZhH=AgpO@Mr}3c28a5l+17E-C@oUL=}wN*f*Ng{@;K51)4)OcT$@qg zfX~GV!gw%v!b5zu|Ujx(>e+qU|Wz4fpGsuZ6Zn%aH=qV)Nzt9 z<}(Ub7V-PGKr|K#sVbBj7!qM0c1iH880** zcYi|#BOl>S>J^@53!#W0H}kLY{!=4OiZxBs@H9I>lrwLX@gEpHnajj9 zDf{gO`#Xe+0m44v31<5kKSUnperVJGm~kCQTo_!80)e=R%p>q|7_cn}5Dz&Xcf9dD z+K%wO(GNuI85_p2(4>YD9*Ff|r0(L71$_%PZZDLyglafes5!*gCIxX9h>h?_lwP_U zt0S@j(Q%AC7cLG_P(m?t?RJ^7e#hPeh>EFevLrLV#P^IJ!j*>@2T858KT1`+GG;Om ztot}ianvHTspai`rR6I`9E~z5gTk4+d5#J#0yEjy2L-^%oD0?~Mt@O)*5xdlvdx23eNya+?jqXRHg(-cD97LZLBTcP@5HAR zXy#iY;~utIcuX;wuE#*sJAI-3-sx^04D8n~nqlq^>AA)7()g8f|JMuU-cbwWp|2Oo zxAQhjQK>7mUAWjA)Tqe*C-ltV{)0M1)M7I8ib(bU|}*NCAPJ zoxNS6mwqAjf4EMXP57Hc&3o4xCXoCNO+^%lXgutYTOcs>sjp^~7lsO?hnIMb5*8k6 z3>eb97?DNiRE!$`RB!98>mhnEn%@C50|M3?y) zhco;|S~6u*@PZpmAPsj58$!hdAep?~-UUJy-aCIA6x0;oDb0sTwv1ju6B( zPW&Ny9T3^Z)(8e_zH!P2bD$extlyynY$3Xka7eUJ`O-OB;3hwV!q<(k1O*1Fm4D!S1fpnXzFvl30`L-3;d_JT(?G?QM z+bJO_sX38z($cAehSUe&X$h}FuT6*?uR7$I*ji-xL$*U`*K=*e0C7&Fn^9uyn?E35 z>gX^a7}WxSAM+2}P4w({?dM<=O?yq`J>6f9g+?``(P#Q<>RcQa*ql?W#(ssQRYzpw zu-c8t28n9lZ>nx2n?`{cdj8QB7cLGVgoSuaC;$4fDxmzp-4quUCiUZjWQcID(JO;U z^r|?aHdjIJm@wn>=3;01RbGCQwG#ZBblS4Sj!8%Rz0Av+p6jm~Q4cqY;;VRdm67McLwwh%*s<91OJz z93yL)nHHk<0L}@xnDdm_#yS*;2G4%S);%9}tL^7PIz5fU7}-UtJ$3W<2Iri=o4RlPzH!te*n2Q8teUvT7h|3JCkzhKw;Uk4@$$DGZVAJfI6gpb2wWPM zEy9bfnmoI3AI@W0aENRCaBvY0{o+rDNRC1FGpDOen%`IHf;=@jA0_O}`Qg8|V*BZTx39%DN@lZC|kOhl9skTl4o4+lIl z?-QFoOpg&tl(?rhseq&NG$Nf6BS$Eb;p8vANcrt^wenqmef`NQp!Bogj4}VPB8!M9 z{6;M?j#{|%7ZrSDULh9Y5^9YZEe20*Q;`~*EDRc`b$SWMJDiNLCK^lxnrJNCIsH|e z4o`L&_t0b&lJMxfkF5wNeV%$|YLS0Z@!Rrfn`S@aagkyA`_O-ioc%u=;eLChwG-f4 z&Y?d{r+9tNILv+BzUlpf;772t58W=XVzxmS-G-jy1-cKp%H7OAr(T+oVx+YPHlxHj zGvEdrRcFTbMau6ZpS4bs%YU%@#__y)qR2bGMr*+$-dqzzF zQBbbOu?L{5er8^f5(&k|?M1H0cHv?#P@^LIxZ{qKH{N(dmMvQ*rKP3j-#6cUQ{v;} ztJPh+q@+YPZQ3L&SFV(G>(-gy{|prs6($|MDzA01cObQH{L)Ohn$0QipNY*NOry&MJ)zcFE_>wv%1-i zAsC-^GVeoW9W`!Lc2%HlPqE`m#L${4^C;CCX zc_0`SM!Goy|1CWy&*?EUixrZtt<7s(>@jE%m=F`LcJiMK=HG!ci0hd9=}#ir{G~yR zNAkXA${BMcJki~ zd&(vKn#s*WTgm5F_msX(VkBHt;Q0#+?uS(>TE9@9di_{Ck_o;-7!oPPsY(VEoWp$Q z4l0;vvWY+@mAHkj``oXLsDvM;sK_Qd&Akk#;C$|UrmYVqD~qT&+j{?OG1*PoYxKQ( zUn2Annr|v6G7o#m-PWN@q&L|0M3k3WaCkU(qeO-h^r+%Gv%kP;Zqmm#|HMhaxXe$L z>nXp&F2KP|q#Ld!8cb#x>E3}T%CeaO{Z!C$-{H_kDB}>pAvD(~a0@ONkc)=|z2 z>Gs1Pwj!AC44y^=!{MvP+wcCZ6I9G94|me0H{1JsL&kP)3yccPQ<&=5JZN8_CWWiDK}s5(OLSNMM3=jhO(gAvCsz4Ve1 zyOSnOlJfF$x%S#?rCqyrdQWyfirkWHzQqoA_rrrs%idMr2@ebLM z{*6e+xFI!*PUsM~Ke$|(;SEXCl!dhLSt^Pd-^IQ`@*EEaL8ERq6&jN$rR8IR`}kIj7Dxuo3uP`A(_FW~>^#jNQN* z%kNwRMA`YiiFhMCsi$U}PI@QQ>aZsS6i-iW+rMMMXu)ZMWSfue|b#JpcUjW?uFS zFT5aq`t+%?cWl+FRr2x2ADb~?i013AyG|Z?NTn;uwuS;TD5*Ij zO`Oy`02vZF=6$5u1p;Ae2=!TO+>kCuM{%M9GzfusFp~U%*yi)-Vh(}Q$Og|bQEx_m zA?kO63K~4pL^dE?j)}1;kocr$>=+eD7xz5J^xKOf;~%k+ZyX8`IooGDc(ntMg_L+w z(5WR4j7*Rwwj2jT5aYtdK?xepWaJi`NJ3TgwE?9QL{jK0u$woiSP!(L|`Y_QZ0InATDbgoHTqK22P!{18S;qPUFBp z9OJZx2l+P;a6Bp1I4=MWb8HC5*5F)oZfST1?^44JAL9Ta4V>?b-?3XdooacQ-*LY4 z-8k8KfbWZDbF9H;a)}?vV8f=s5lZ=&X;znN>czcb(Xz>tBCq|rL7o`BMAFxeZ`Fy7;%>ek=d`-~S{tQx~t@00YrU z`6l(eY15{1$|s(=y|*<3rO)ZowGL%nH)KDJgzWOMB! zvbhHjbHG1)bx)M({U1z7Bd|=b1{Vh$h`6b*=-Ry0_k@D)hi02}b0(^6CQ$fLq(Tt! z)I`M&_sF`bR#-VeFf#tJyYdu`%J}}N&Bj4W76W1s?&Eo)w$yMVg1MJ@4(Dwk7)6<@ z92E@g1CBSEagybP;|<3(O|bYZjw3W&ymTFhFZU9`?g^gTD{#!gj+^Ij`emsYr9(K` zZ=_QrjB#3U9^qVU0JxLx-t9(MA&NjJVC%qlfi!>k7jcewnADB%$`=c^%Qp)%CBLNH9t!(Ri5l5{Tr;{>d)1Cw(foY*_tac@ zddyOpu{LX`CT6<>%>Y}c#L1-tTF9-#+sTRD8c9-Y6|x@SB9m>g$=(Nn_3kcQ9OkG| zk^R$8KgktWTwx;Fq8wa3#;GZ4JS0FuNz zOlNL{qysvIQ~PY{l)kj-*+jibu?rNp2Ffrj<`7|<<3tABEONr**2|3{fdS)y1)tYL z$fV;zlK!7tdERNa&CJtaVB0kmjaa^Xx!iQq zO~2n0r=511?=LQD3fuJFL1?GMsi^&Pz5E9d*<1tMSGz&DzAREN;(QDm} zWE-qszh3UV^UmMz2^_rD0%@qhCMU3_5Sgm-m+oSpf##Jo1!JAZSqi5Zt_l>2`a9wp zQ5^`x8yQWMLQ%#O2)2oMqQfSXqNDtZbHXL|KIti~#^OAx?#;DHBzzb8&U`D9Z9 zxW~XLKtnG$GcYp;u1842xpe);8BPr}4tvtbk9|mOfT#3ao@CE4&IZms4sEvOFJK)Q zF|6M$b=_PCWTGLs$wr&0JiJr|G5t^`JZ1$g+)sUUAQ0kQ8x<-t5xqn2wLNE;Y(h30 zYjlgF#hcJvysd{x`HBCL*&S|`H&(_;!+J4tT&Gl7Qk)=Vz3JhiAPSFzz*lpgIPqS& zHuGO{P0LKl%FUP6TMO)HIqU~)i<6%>HbeQzFUi-bH^`mS>&V+vH_E$H*U7zKE|gDZ ztP4Jh?oVL+GhrbT6A@}e_ilj$g74+48$~`m-R=31Q`VRnzoDK=iIQS#I3g0M#qNKDtuZ_IeVg7zH&#;D=p;)0P~+ap z*lOeu-F3X!kfbA)sgw2xgh?toVwl&^@f#zFZSWimz5@g`Vx34c~MQA?Fy#eu`eowskqTbup0nMt8*ZVxw@4LcCX2vf)8v;dzi<$+s)^DpY{^&xH z&;KIw&Z#zo34s@Q2bzec3h7|q5%&n9h&7rC7eS%#qyGl!(5&f*zxv82GgJ_B4-;u7 zdRnfWLdr4GPcpv9z~cIFoT&)z5akDELtmzmiM5P+YHR}sJV&&e>^8E?9KiF~3_R*JvDpTI zW9xF3{a&yycwek?Z=h3}RB@c+G&p0*$Z4t9`BiN&Bb%u5kRd~S&r}cW8WC`!(@#JB^zXv_f(tH?#*G`x9~>HQB|!#t#eA z<+X{ckft=HGdd)7?gj461DHYU0EifAs+ch-$+xVy<2eI^BH7+AP|Gp z{~fNN-v=d28#Zi^I(7Vv?mq+PlU@`{zOxQIN@S=nB@*Uh{{g4byDFCG;Oq~aphS_s zzs(93Xg$S3+FYYiv2@XT2BcTupj797F~eStoRsrqrs9@*|&# z&|dVFO?5^Dl19$`+)cTLW95BS$bzXV^7Hd0DJjWh9LB~5$=cW*%8JFia-4XmpKf%7 z{hr|EROM$J1eAfnm88t$6o>ms?I+EjW^jmZ&Nsi4xkmXJ_1zpl_5uzElclDgMF4{m%Fdlaj7BPu?cw_Dg*G z#%JmIEltSi_DyZI^E?mgdUgj=!y7efWd5B$f4&qH z6v*1OYi07}$!6Ou@cdp>RAdDB^y$-O)~s3P@$${9j6h_ba(1DdGOC1UJNb{t@t`d2L%MGEDyt;nK{6ZmKjY`B4og2#csN-MUQ* zm71#=bzjWAuH@^~OWa{A{A{c)5v65P2qYfZ@#hNDCu!!0`nN zYyb%-=;u+VP2D)-R@mOxH`%m6YRnvg4e3f53O}09-?zH|svxR5*O8H|Bl=2>qFF{xRB%`w?dvSx#^R*;0sa#=`yf zf_3(iZcYi(x3}1ms}3VYF3^8*1n^$2H@RR!)Lg@>G?=4ojEn=PksRe% zoX;aK71O-*muGA`I9yL=8Z)N9P>!eO9Z@~^BYS?~Y-|A1>Ce&c=>q*O`&?sR56=V6 zBjdBebvty94@6|Fc*CN^LzA;*>)h;26fd;-r#@RPABgQ>!?T^5z zA)sK3igo37t^=TUL&6U?Nagl5vOPzy&%iWmzPGl+#lv@vzy`dA?|J!X@pvmFHZshl zcl)D7LWG>yv_iU;&Xtno-;2N>WCB#UJttha0Q=l(ryKaa_ugwB zh~BYd$C@!f_uO;O@6YoC&Yp zg}i0UmeQ(KEAv2{<5_Leq=_4y-iIi!tdw=z3+2l>8|BnC5pr?)XVPHWV-md{VX8#q z4^p+MeMA8pi;J z4y10xt{PW>MtY=>-hL%jG_G;vVEkk3)7Zy_i$evwTBC6$ zk$y}xxc^Ner@m~Rpa^gru{b{w()^vYZtgvVF%7p6gG5u|2YQ8&twn^JaZ^O49dN@l z&XERW&bp72nEMRj9SEYY2scC_V!Op)yA8xSJj?qSdr93eHOr23ndvtVhyYUuPq%kU#b{j0aWd|c zM`|bietgG>4e;x~?M!lx8>b0Qe}5oau@g9#{?`Ye-S&9#us@W0(0v?RK+nIoPKp$? zJVA0AA0rhFy4kD)vK<_-9U6iKg71%JtNu}CeO+Y69LWt&mZAjG<^BGKEo3^Z2XS^z z`HzH^+g@Hf)&Ob=O^{R+^}?^E{8bb?f@x@#kQBmtK0QoOIGj zrY@V*b4GzV0(!)V5t5SP6Fz=GXrF!d*}I*Q?V@_9P>z_jcDppI8!qQWO_jLW|Fz@; zi8~Q%2apPna3w5=5<`XBXk∨tXMu#!aMvh9T5c5h);&hS)>^J1KmC`Zl7;M1zY| zSTjxql0+;+v=Auo@dpIObEInbyUq&Zi~ypA5NS*Up%Mv**J}o^IMHmp=?HbA3+xjO zYaBy`6P5-1Tt_85%DV4&vN17Pyi)ClpqBHUNU2vrdp9^QgT)7 z;RMCuieMv3I^`uR(pdAE96`ntY2!G2X>LYF6A@H61i?mQF6wgOI|O(_bISpUH=JaE zZPTX%?hHg2?2iHxWu&6|jBJLO9_lTsr}u1tsXDfT_TL+@Qgw{h~S$i^|OoZ_XE zwj)4m3rK(F_~G^=e4{^UD%$-b>-3J%zr(|UjTq^(!qgCFWEX&Iu{Urg_=8$uzPG?_ z0Jt9~3C+{mkF@-Zox-{1yCJol{wie8>GPFc&XtAjZ%RCR zx9~dR-ZW1QSl77*$xsWJ65`eGW=mx~S(<#Dl%;g}<2xdIkL?A6+A$v8?^emNa&f=r za%PXl(y?K@+|<9m^j`a#L@)o^I*;^q5sN>Uh+iI;ZJ8OeC9l}I1I7RMK-gkFS_U=^GHlgG$PpfoyU3Soo9q^1zj1d2ws2AIp@e7cibUY zUU}v3^RJ0qBczF12ZHo*#(4c%mvI->2M$vf>El}0k$Ou%vYt$g5eUVTupYOoQ%q?{r4Az6Pwplly; zzFdtXy#2va<{)5*0m38-z_>9|z#Yc{VxJT=#7&YC2@whQfo3TP@Buej&ZO-H1K} zDP6ipJZ96wanRDu8c|585$Vlv5nML-E|DIW*y$6zmcPR>@E;Ck9JXXo;Z)_bsS5_A z@WLfT1pT9^ysv?B00N!pE>T>Zxs0Y_ELVkc5)tc}Z&?RHFmUhkS*#Oqm~v!UX_1)- zQmX);;(VsA+8M#gev@ub$~C;pJ_qA<9{xZum*(v#`T!0OzOP{OwQ=?{IfM@9WFpYd zg^urVKf-_QWXshwl2h*XNTqz|?I)vFhe(C;x_9Gj(^T&F=7yYWoccJ@9eV)8H54yF zgxt+`UwmM+T7E(&nY<=fKF?_*qtmXJ?E~(U@*)3}qHY&UIg$K8v973Wf0C4i)iGI6 z1KZS>+lIH3hfeJ*$2apz#Kvj9SN(yI^;0B&+eUe2%rY|(W}ko(Htwn89NP4?h+$vNeaV*G&yRTsApcR5266n^)MHjaT@$fq?y_{(&I2F0zv9G z>3a}rni?H67@@HXbzFG01Dm?A*vBa05%He?ktM%i1c0z53IR&gcp^3ovTv3QN~gj4r&qkUf9H!_M&$fnPP=5;hSEMu^dv#zYu&pD;8| zc)}WFE)HoRvU7^cq+WD{-bYnjhKXc5MyUUN_9uu|8yovHuOz@A{WCZsn|slPi$eiK z2WcM3`h3oN`gq4WI}uSP6G^$~glAOnb+Y#suP23>i183z +JJeWpTzVvQ>hRfg| zrb7@hCF_aCpRDJ`5vKR{)R#rZ>*tbU&U+Bfls9qT=PCcu=&Z%jcAMOLd=8T&;J9_l zmxws0E}L3y($PsJCWV=f<-zz2UWQvK{h+p31-*xd@6WXc*bX=@$k18#gLQ@wQ6@`( zGBX_N{5|j(afI-kNx$|1!A!$b8j(4`Cg^#U{g<97fkK-sIx-xHE|bYH;c+YO5!=lM z+In|{B+$<*@I|m1@J3oc4xfjeK1)hQBO@ zoz9mEBI1#CbX%cP-gJ-@o$w#|aG6&&WEac4EoCxOI>>t!!{qEaUFGz#@$%mdO{GM| zws8viP}b~dnO^@id1=ym*^pIezSHEmNI9}uf<%VtZ#V}6q35|=?YA+DGUU7Yo1~PK z#63h&X@zXqUL;@5+azN*M@V{$)1hx*PFA8WJ!9S3WIP-FN>0GU6qJhsTd~J8u3UCGlWFS zH;qb8e_e%Uoc%lXZAKiITZ05oF&^WSU$D^w(&?y&JM9(Qux!gh8zt};xM*UU8XmTGfdR-QjZmQxM4}iWMxqbQd6!9jTX7RLe;#c9Ix-Sm6KS%QOvrf1vq+<$hN zZtppfLAQvU@{&EL7pe_GGc-o0!QGqnoD+q{X+kd;qT+ntbQ!0iYMWtF+3f-u(jiqo zp0QpQZpe~|qAm8l*a6rw*g$%1dG)(}r+9!2`zA^j#vdh%!aB&EKNrZIna9g-LqC-@ zXOEQ)$G$8ledCeO=VnOL#25+HFc|YwRa>=M|^8C0Ja?hv*vQC9IK2g3|4|WdC^a2svK-LsROLk$I%vzf*rBx<_ z9Z^tHE}zcaAS3=gQEvHouKe}A*>dH(bLH2y1ybhmIX+ytaPd1lH8H~Lt+(DXwb6_$zt+@CyCb~*99$sOVL$ZEBI(|=o)nf;NO5_EoYSd}98oe; zVkRp&k`4{|F{XquSa{u;Q;Fx85mx@jI%8~T76R$f`~}fVvkd0Snlx=C&Y%9NLj7VJ z(MHggs<0;IoMXPYHmj3DZ-5Gyp6 z!HB|mfmjjQ94-4yec0$b#nyzwU}|t-pbb;QhAybg$u_r+9y2wvXTPJy!L7P4x_w7b zV~j!0j8(yq@(0J9qK3l{O6C|C7-rP)8RLuy$bp4B!mDO6Y57)pY5WQ~zH_RyX`n`R zRY1cd8X-}B5iH9i2#U+S^6AntX%ZhI*LAPr7%!qqbKbLtD#kj-tcyJx;dMBH$ScDM@XssA;p+{!_ROw8E~Y|(~E_Ab)16O27HHPj*(G7 z>OW;#e7DR*cYp|_X1uGu&aE(%Gwl<)yof+MHuQ)Q;4~19P!*v6nkdHSenMiK$ z$Tf1_wwEMiqpw>zHP`J<5N~pONpCb_lumqvBvnV(eb`{9`ODROeR zhO%9S?@YC?Nd3-N5&Pq!40FryJ0=FrX3y{4REoYT#3H<%CGtl2;<<>9%e^uKiA3$#(~~wQy68T=(8A zS-Wk=0K{qC)8vlh+gE)QWdO{6R(kb#y?X2(2?_BSr-2I>2RlMl?B4JDd7puXWT%{R ziiv37e*5im{q@(IdhClYzF4lk_F6gXth1zEy?VY^A9z3zqehJ~`{(ZvaA8qaUMU|< zUoVBF<#I~5MzS`mP?m4Wm&VCSk{^*IsiAoimOfioN)^(GPa?HMs}Z#{H^bD>0P7C86u}SsaF7>=Z_K&&n;g+#(SXdY`)Z17z~Bj?*BFj?Rc# z)@LGXL}t0B4kXA*n(~5m_##^22t*wMiZt@TI^JD@C^2=`loatZ>wb~)4-M2PCBj)t z<1Dz4RC0tLDa<&<;*>9_rDirW8BaK8c@Qb5mYXqRt(7xKQz!Fl`DlB;102_+J2U&5 z>?||>QqO6=zK(J%wrd)9DSvWIdLhd*X>Fo-qA#_UN8~ zU)TR89?l2rjA=l2qVlkQCvZ036OmaO(s2%`oyM6!HUR16d~Y-<<9uNkUOyZE|(t76Xn+xYRmWmp?Y5LCeo#GJsHqC zNxG!fmFa7?OM%*1t?B?pLAt)jO1soJX;d#tqE$Q->0YP8e)U$r{Zx=)*r`#x^lFhL zk<4sVgy{8AuGdjfLI>T~e)3z|rSkafFj=v+&@@N8`lwdYM7LMnTawJH&t`6vU-URi z)h6^uUP-xhS1z;Lt=@*q^%zKd$MHOGW42`EmPnn5Fo`i)UI%#HaN%MvQ6rW-p3SaZ zyPC0G{LGAOYO^aVE2Vz@`VtxGmy+e8*276{J*sW8Y|bq5_aZX(11pq4E{$q|7H(ITEPjHtUVQsSh>tW@8} z$PLouNGqdB8D4Cn&k!xfRF)b$5GTfmi8edXpYejzfQHW}B0<WXt$3j336GFcPIu zKMR3O)R6rqVu4{r*H*f@5;-4mvpr7g!HJF#El0!?Ng$Ge7=k0>)!1=Bl$zRW1PqPa zXmG>6v#%4Mu-(i9qaQ8~DPS0FSL2JG22S*0w=hw{Tr~joelnehs|MO85WQxESFvuQ zN+9}(Fv3`+4%~%{KLf&rs3}=Rb98+qGKkjdRyboABp} zKLJkA#=S*OQW1NmZtn!;A%ySDx9l1Y55g^Q0Y4LwhMy4f)c#V^1SycxOoW#5D(YCN z)h5ddL5L`aqllUVID)ihgyYh0Z6q8XLCjN9#lGXLX8tpd10v$oGm}*iTi-Z^L(2=y zT2Sl2cfk4LcsRF2tH}~TRChW)_0GiO{ssJa7oe~#iqSDRl*O9^PQlx#uI2GF!re8#Eoe*-01^?T(2I`+bC$fu6^Z~^2&sjCR<Ikp>0XLHU=YTZurk0kKN@U44Nh@9^ z3B~KBebWZIz(+}V{zj>rzsiyoA``(%_iacJ(p>hPP2q+JiB^-^MhzQvO#Z-=O0V_Xn{MtswBj0nYc-4E)x5wJv398ljz_fR6lKR;q4 z+58=XCk35|0_04c-G-^wdp`R;E8>}LO_~~p03LSs9S=I`dysLI3K9$`B8{Z;Vqh{K z9D|AaKh6cO#cPh?)$1?1>#BHgC@`*!Ixrjr7!+)azhPvt&w-8H_7mJlcFlowzlqDX zN=|W^aadto?G{7|$m>DGb6Hz)y21%G;5v{%u+G)>-1P781NRkEAP}WhO;9B|jrGqxNQM=W zXrkNn17UkK@LW00(36>C&3RbxxfQ`UuFNQ>#=0;1T6l`>!4(K~nuo%bWH{lh;Cxa_ zM1-8)CX|d&-;L9)ZX^48L@9iQpocTKZ{Z~z&2VCB7b!{YA)e@ZMwIPK?0qC#kIxe- z9>#Ctpkbe>ktUM{PNv41Yln1t&J_`SzAKKinXXM_lkvU5le6Bo=Yq^MoE&s+r!gFp zMG(U{#(5pi2eQw|0wBANAON-r&jYSm92nRd4p0dGbo+~6BEZoRq0%xrR(do| zkO8feWn|YzMyy9x)v+4@wvWgrlI?#iJU^^sikzt1kBh3Ftx%}P^4!=J^3~i;QsGn7 zoC7i?iO{#!^Ak+eyKr%^VBfB|KCEBAUS4?N1xZg&ms@YW)y%bKAtq&f%9JTacvJJ~ zqV|K1l%xiQdMc$`QU)2?*SRG^ikv6n*%|LI^r7Q#i$_N&c6W~ z9&5xc1_>T;3>1H0>}aW@P+JWP*JF><^915b#6pv2}Dru!$-Vh2U5rWhV-a z@c?nt_zcfD>3qo@MEYE-?0V!M#uUaFpGE3A=LchgPPAWKZRf7yK)|E!x^xd26qi*R zgDPGPk}5~CG3LnZz%fI_ry7XWm*U-DeKfFTMPb#14xmc^`;exIGb@$Y@VmvRPpI#1i%M4uW`7+Wn^5@4+J>9}dpQphb&K08DG$cJ*zcVs*XgtbyPiOg||FkLle2-)o zP_o6t0sM~mW(+9b9pHOKz~dyR7GF`8)<qQF!)Q=5vf9(d; zpwC>JwNoAm4c1m=78v`}g^Pn3H6pTUmPBee&CgCg`DE$Wub&JUFhDN2-~##E-~J{G z7A%lWn{=^sQLB+tSSIiMw${|!ROV$!qZJ=W)atPknzdX)vQ|pyhG`PFd5T13Ew)}_ zj0?Q+7zF%`Awnu0BQ4~}WB^Z2ot;z+QIIgpm6F< ziBh_7@n>NDBpS$Qq5$M4b&=7l%1z190R<6H#&G$A`-uJ^4VAm98pVc-XjVy0Fzf!3 zuT^OO-HJT2m(2Jmzq_nJ#UZ?7guL%FoHD8=z3ui8YDB6Ldk9Vb4$qMBh3G@92LdsQ zn5MJ}$1NpEj0l_dU)%U=-ESguKeSw$+F3%jD_@(nri#b8-=OEpWT4S7G+=eY!Hj^V z`I+NbanODElso--UC?2F@c{~fx|>2sy9(>b!{q>tpy6BJirmoJCh|myILPTTPKxF2X z2>s56b!=efVZV5Ocezc)E~(^IcGT_zAkv4lZy^6Z=Lq@uul?k`EBnYlPUs+=8`Z5k zm-&H?>brUu$K}>!yExcUBO*H^Bf~U5Yu&oFg!naZBJH$SuU_V7I<>l}y`Wjgvp+49 zr^YNb-tUy+wGzE*v~o%S0pzEPH>I;BLP*|3U6s5bBcjU~3lO6}XoALAjV@=|F&byR zZ3P4!#SxR#&XLYd8UFrmHKp+o!xBa{6m_92X?b z2s7%rso_K5I|7O5_DRo+9CNSrpi@(~Ln@<^Hx33KME^fL-wJjY2R%y4D~;nSRz-G| zLDYnd5JYx0fyj;y^+N;1R*hZE_ z9U*T{&ouRw2NXC0sDr+IjF_(CZARGfUG&vp?XC*Q>Of3W_iWZ`Jx(IYWHd2$lE3fd z1lHf32)`p3_?PG`_ZHmXjB$ipNNXN;zugY$&onsP)g%FWpm4qs`IJq~ecwh|*(ajI zWK|I%CdwL}Xey;@oo0dC`EHo15eOp3j$<8e<~-r7DAePE+u=smC>ovyJ1`vXG=L)t z&Uaw4>Xh3Td~C%t-y6;n_?4`)U-Y%}Kee{Y)}{KpayyRkGvBb2KgdRK_}dSaQAzUg z>L@wp(@J@KYO&OhiCf5>s(U*(+6 zvGU}ZUFF4bD@^Tmxu~6h)3ORmO~Z~mMzlAPH?s)tWI!lZ{3Mk$|AN6CW5NNi zCqnBVfrTV_J*ixHm`S&u_@teC?YxeZaca*Hu{4dr2*Pl{kZG)foetpCX7ONT*imX^ zkjhOB7B!@#nSFSk73)NdAyyit;qB)0AWMWW22STQRVY8D+w5kE7cwV8fpFfT0`Wx`fQ>W)P|3_#~vdiMM%vXqoVnWglNAHs zImbAl5ezkt{!tPa2PL?la*N8USHEUNwqD13AhPMdgWa|{->b%k@5$W(;~t}*Qi#Cv z3Ks_ktOZ;L2s}hoZCVRd@NFLPsx0jJuxx8~f~@QKR~ggkad~286Iqv2Ry*n{S%aG@ ze;}^U{6CSlDzwR7qFjf2Yj@W<*(5j=sKLcSOyn7%3*assvw`;~e8T7QeeoKymuP;5 zlbP%%I0Iq*^P?gkT_p1U*;dFS5b4*EvDl7*+@UXmo=do~#SlAB+T5!uNXe=S1R_*F zH#D`Qi5k{E=suB(4S(VYcYwI&{Bo{v9Q3(bAJ^Da`_5-rJAi-5pdbT|=Ak$o9B|&@ z?_j)2MjAqz_mM*Gj}>FAXvel7HT~zuL_XE?uis5&z2@Sj6pelM!K-@sM|!& z>e*QSbxKEhB43Cj~FpR5)y($3l9L?#%DW% zQC$<4Z;{s~u9ju#dD5dt@VkfKd$9w~YJPQw)<2F~AU;>K&(SHv~KyvL=MlxgV3 z{*uy8nj6P}z$X&#)N28n*&x74K@02%O;mEsKP;(3_84mzd;XaMj4PvA8qX8?AX?8k z3*t@R*Kix3)eN9`>h6A8oFToMCz_~4l|bKt)syUXjuBW5V5ltK%6;jP0gXc?yh?_P z?kQO)M2v_a`5Ul@DGq4kGsHESEX#keeH&O?i2flY_QQHm6>KW-$o@H2_VYh{z?_A^mA#_PdvWtGOzM`Sz=pRlyHo!Pd-|bLkpxLy1tae zH<$HkC&&l+N6NP=O6B-24dv_((GtDj1M%PxPoJaTyKch)wh7k}4eRt8312@&nhZQ& zFN-8ovrYXp4Q5FJkBtnI@URf+npRh?7}Qcu=+ekEG6S1)isg-8R?AakmdOXx*2}yN zImU_AEGcHcrjG9`u+RNkCCP}+4P|Ki6gjtd6T^*rx?Yee_U>rT!ft1wV@&9ZKdJTpivVE%TJe(A{Bg!vIbqHsPp-f9oPvTq3G6YQ>|52!aCVkeMg%UIPbAWOMJ66+^}kjTD(~ zK#D64N7gGMsQXem?IF09iKxxqR45BK7f2m7W{+x?Kt`BtksdYm_tezTrDTB&W0&FC|1N z9mRu_3;{{<;KI-BcSji&&*40CnwmB5Z^fYz%X;5aAI$#1m6SUnKuQ0GqY#O35e{D( z&%v*zw%O8zu0JNAVhcjCayWnEdEOjHrbC@*w4v!;7A zPmp6fG?1Yk8%Vnbalg;NCJXAdNvq_gpI4fuVMJyT-Euul~mp8mQecs_%forXM>01*E)QrlIe z53%AO-FEIrNI@ev-$EUHGjIAVcFDUF-lbnycVoRX|NG>pUn2UzEbz^)kg$8!f5bQ->( zZ!@`XWJkGoWCywLq>ggO@$Jl9-dZz;hkFU}#=Pl30R&Hw`NV5>fc{Q@5hBik@5x*u zxn*IpG-ZU8D77nA#2@U{sF6TO!r#uSZ(KM4u9PrgfeGi#mjKDrpk41Vu5Ysq55ZklWUZ7T*9xF6)gJS{xR!HL}Q>}#_U(YLaozgXw(PeNtSsg^C z7b$lyQ5yj+C!PG5dqmouU{muMpV{|XspPzOIn$mm&Kph&vh;MF4Jpl&O~n!N-28Bv z6hBh#-_%QX!{@uwP z;50GEtTsiR6j{8jQvNe;h5X+ytL4kt8)fvu&9WptS3*NVOv*SBZGWc-Wk#RQRC^%+ z>AA(SV3S`K-9d!X@(P){Dog(U**v*t)I#~k7YpRsAD79>E%`=xR~L*0Bb`1FOvyRm z4H#VSApa1-)9Dn@rwQ5GTi$6dF`x#x_=z7V5 z#L`Zq4G4NkM!tNrc)Ls~YAnxW_L6_4_ZE*D3(>RwPa-!>wPZNweO=*{B=z@$AU>jijNUrpd@-{)5$qUTc$b+^?SRM;hAXMEkRoY? zh<(PZ@VRs|rGq*~4nmpNBamt67KpA&M_uOjPq}@#>kk{V>9cX@@q;5 zYX|gD4magsY7dZsMr6P1c`CpgD_5#5fTO?tNXwguY37U5z^vC*mQ$C0FZp4KlG8$; zL$og)Z8PKe%r(DA=#TfxX{|%Vqc+Rtyb8%K@kow}HfQB5tEe$7E=9_xqG*PGem zMDDALI+0=e(0eNY)VJ3S(p1ici<(4@vLC6}i0$Uhn@iKCO(iig(QSBEi?LBf_HV1Q zq)tSb)Qt)^k+!=(pD$~(3gn8e;qt-pp>k>3CVA%Adh+zK@zN+MUOXo}VQZ&|CPR3u z#;PFLVvQ8)%$!kUp;4nevh)KFBj!g(YJ9D;3^T z{$uBk(g^09_iWeCv(*4Wd=g=xdn{wSPW!Lz@=Zz^h73Zv?{(IbO_Ts5rSD&D${ls$ z+tn~hJ;IWp13ctJ18@NF`oL%&$A#fV)E$EeBM3)<(+QT(h3vbiSjIVk5#gT_PknX& z8?3>_YXKr1!+`UU+F4}KEh1;VDKhYHw*GGq1(u821B|Bh+!B)=Qe|@&L!U?(qIVDL zZ^@8jGE7csH4L%eBiXLVKA<4WhxKSD=y%EWu!q+~O#$7Aa~7kvwgj*NqGj&JTzO}) z(oC|huYutHYCcG0);qQV*~cnY9Vap6OK?O0DpmX~tJ71)F3*v5;VtEr(h;)!=oiE@ z;t?rsa+GW-g-b&uDM|%10vVAFUm^$*e58hVJKu5`4rQFffuN)b;Y}xWPAJX*xRv)4 z5g+lWopLew4&{^^#Z2fRdyH+twM)J+Y2qn$WArRgk$Qaa_puL(Y|-QU;%bp;ui6oppR0JTr{|NI*`~Bg`4~q$sp8XKw$66Svv8oy z{Xi-Uw#r8 zuKzxB_7T!JAxhSSHkIw&bbn~liXDVArSG-k9eAr$obiTu;~Gn8L_Ntm;Z^z4GgPLn zFOY$)>zm#jum7@2#w^N^*Cwr&v;RBA2CKtakP44?@q5S24Gv(J6TTPaT zb8j*Ft8a@$>D?mXpzNak!=iLqzkSCHks=k>AJ5nz>$80JV>OTx7b$li@0(2H2WELU zORVGj&4r8o1CMu4@-YrEEL*nBq__X{(@zo=wc~<%sDp2RV|J0O*pg>FrZ;~$$D}D; zKA^cwUbRg!vJ2(M^Xf_1SJ#LK&pDtjY@iZ8>C>cB({u#UIYosn!1K&V5^ABSfrju% zA5Uy;$q`R35(APjm!JM{s)}O_nJg8&UL{_WR;303M!^qCv=CM&6{Kk@ya8T)A~|fE z)JbZxNTnlH3}Xg^Y4BaT;;J!4`Wc1^sd~=k&OS0Xm1s2v3sG)9cfM{z)F8sxBKi&4 zQ~Mn_HyeiI{iNotQbUY-@pV&mdwOozC;q-xiI&=MQr|Fq5Y^Ogv%gN)W`qoNuY5Mm zUpTjp=stj;%%gYj-e%@~Ow698D7)Ah1tsNj!D~~cPs>CZ+)l6YssN+$$NTmEXk@R; zJ;2r?uRJxaL>?FrC4-xW`JUYkaBeY1=+bY4r#>ELVC*E$;td-%NS!+S(O5&|{jBTh zjMx1Hfvzt&0FSxP*4+lq{y&VslAf7gEK4@$8YeOQHKbr4ITBsha@boZT8FVe z;1WbMQEPgO(E9`diG!Ef+4{7mar7?K~X9mZG04GHr2Q`jtqSz-qY18B3 zNT!2OJ_9Y22}Z3oqB)_dwG*<|*ggh0v`-;>Zi;vpelB@^Z_LS9fw65QD{!XkDVZ`mW{8AOd3uI`^ zSh=d%W=UE9y?%8elHK_-$&73&kNvntF74Y$epr|xoAN5;2o>x9o3d6OI;E4`J8FTH zRruK#igu}Sa@+8Ba@GH4_@4O#M6)-k-IlF3+Xd%Enly`5EAg6XulQw$&RV9Oj z{a&W;qd6F<)GhV;;Mxsz0#hr!MqjgVQ;w9BRZ8c^bD_&*=c%Cj_ue$1$dOXom{xsU#OABX9pV&=f-C& z&j0#mu{`tRa_QEjo{H^!Lndt+#7c8DQtmmrzSR5SI`JS99bg0?psD$y?g>MMRB(hS zgatWLw~Qe|-4xBwi00A|hK|JuOVYIP!Xt)hkcNk!?%b)JY=aq^haj3W7c1diXveKE z^AqnipGnlbNiSQcxIx7&-Bsxn>qOm1Pv>vcHe=Kvv`sS^9}u`SU8DKTqA&FG^u45& z6G0~3Zsj;@Y;g=Uu`!*(ea&fj4g(kAF-6H6k2}QvgNk}S6Qf{>3X^m_R?^Zje%PKd zfRgNX`Q3~GQz7GPxMG^6u8Sd+S6B+^C2U-;ph;euVB znO7_?j9(#-k6tQMR&A4CR&13Y7iCD(#26E0bi@YZj=q?^Npgz4vM#iVB(~`zq2Uoy z8rxVrgKm-VR9%CAyjM3GkhR1*O^Ff0jYmkhc++}{x6>IClDR;dZT?P1G%1sY4I0Y4 ztPttlGT8_(nl?pN7EAb=iQ+-LBaBHUMg(_0&8BuE4&gLPlIRJNs^9b0<$v%Pe88AW zI*OC=1wWBSP182|j8L~s_7prfQ+WlENnBp+cO1~1$8>!%MDUocNrNQZSQmg}l zZBPm}=vFBWts~p>3#6p$MH1mHm85aEN=SKub!t;G<`3SZ_m*eXcqvZnCnb^frIiZG zi~2Q_{;lfE;>|hIAU@iNYQAUcrxA&r8pX@!dVMY4QY>RL>PUL>Kv^2!Up`(|AuSu! zlkJ6NGIvv+e7|U`DH%GdZL$=XSC~d;6PA%0zaw&s%JuV-#sqziZ=q(op&3&l-o3?*8kFOu6RWS@Oxu4f2z2Ebn7GrOGMY(&Wgd^^Eh)71%EJH)`CXXAxeqWQok5KVQnq%A}~MNOE&? zrLwZp_X-yW3e*<;xM+*~riKY#_u1>R<=kFP43Ut6G(i_ruL@`nLNE~2KjT@3kRU*! zvy7^O-0*FZK23uUx@A+(Ok*@2L~>gznbI5$!;uCi)Jh>*2i;~32?V@>F1m<&j0waz z1nd|eW`^aXD&Pm$2=_7-w*HZ3z%@og6vRSc_j2}|Ml+(gO1{LRg109t;RLxWLa>HyEw4GIXCn^k+W1p zo%)I`Bcc3e4;$efDyUEno4R_N5!IxaI>^i`F=Ko(^Z#(HmZ+csrSRUe9QpgK1bL+L zVtKC4O=h;Y;fY{TP3}K32+$kXoK9$hb6C`2!zvbj@Z%Vi5 zt$)}yPEEDR+*0swr&EmRGvW@R$V3Xqd5iOy8eSTCkqS=JGK6Pad0zJu2L$Q* zMDLy2W_UDnxjhFsodD-=j{eST8QsMQFU~vXLwVbqyIHdIB@<(gl~t9g5<2e#@i2;W z(N{K$fGjqr6pQE!+MvP>!0>uAA-9NCmp zWM(iU7#$5q)7WBfxNIx-7{T}J%55s9qfN;IeG53eujX!&L9OenP!8XrxtRkZxx1>N zr|vi@Hd40g^}Tp=uJ2ikO)9oOn6}PjP5l|R7nI7gKQ7!jC%?)HsYhg zjWeJ&^@VV8c%eqGysWG&dHU(6<-rFZl!qUF*reMnU%p&E`Q#JXx^-vIos0bo<_XSP zw_WmdVNI(SElrgKNEvV6FxHTQU1;@%yNs^-2mrxo>dojVexXS3EA78s&$fbLUvZUDwh73_Uy!e4Ym!) zb^^6qM2Y}JL#-E11U%&3E>HugzjYumqZHB4`N3%7eDE`bjgtWoWuU2=ncvyS9xG|~ zYbM#w<}{02HO@LM90!7nLl&#H78uWeLY-hK1}K6RT!1_J??rRV(dJZYyyvjA+8nO|zUL$5CgliyZnnP_Lw zz87!@5on?#U@41Eks<9;j4*!d z*EJ^M+NWib4DXm?BGBViY|mYvV+1cElxQ~BSl1@?j5wu%*Zhq+CPQ%EhFp^!*d;CA zY_CI7n6!?_mH}}Ya$V0l(yMWdX`trd?4C`eL0qJ6Z^uY3LiEBu%~UvgBuj;IplL+# zBtx(J!1QyP*={c^HR2kF6VY&{$OHn>jYEO$@<6oDS?6;U`Jtq&!p!L2ThROG_7Uym z@A|lQaBI2Ym^O0Hi5;Y8^TeRe2Ny05Ce*0N{`ljMW#Phwa_Oa)nrJqW?8wMSGtMg` zW9JkY7yB8yfYy(VFi~q#p@?WtTCr8uY%P?Pnfb;LS&|zrUS@{|f+(<=>8zxWz$En> z0o|aZ{;Q$@vV;sFdMCondO24`rQ?MMLTfgzVr)n2VZJB)zyjT3 zpiR$fqkl&jJ79a%eFtjFax6Ei&LHo6gzyTDy;kY(Ae9IRBbEA}7?-A->)0jzb>QwGoA=Rtg->DGX0U)?Oen z+9MmPBeIENBeb)39vS1pMQs4FL+V09MB_NQs%L^cdQ_A= zb99(o*DFrGnzKP(SrjG}WJRH39KaJapfpZYALu{gzlJ+d;a9(Mo5W`=(w}u*hYxd< z3n@Rs@qEnPmJ4t&6Saj;0zorPgeAMMb*cS)MorP|laZM8`v~?vIM#o^9VqA2kMc^I z>QthlwPuu>14K5xScoP&4thA0GBa!nv%h}(jMHyd&Lv((Sr<`#dY<68hF|qF>SWHA z)`hbrCRW9||NG%x#)nboPQ+Ya>)p0Wiu>Lq1>LWd)cWuB~(GfDTYePA!M-zRC z*Eno)i_6SCn*DVqZ3(xiqX7B^UQ?X8H^8-4P7ARsS}! zTO%V*Nmma9>DTpDocjxQ#PzEaSIQ0V&z74%nqwRWjJgV@VG*__1>EQHv2oVeC8n{PWLp{`u$2;K74+ zfwvz;xAEfQV)JlufT4@%F&$Ej_cbmm%y?!m=+o3JIiT=_72z_(dyzL=|x1s9pi%9!bDe7K3bR8qI9@%%%hE>-~Y)rOp~-ikdf~ z3p_{_Cwk6ja4bX_I548z)U0w|hC|pBS_A`$&-4e;2gH1`zPGj+ zKrZ$<)@;j{BsGkxW$Y43Hc{oB4V~%$k-ag`j)kcjbRTgl;WTsMq81`t#Q@!zdpGx` z6Z^o3qmB*ZB_;lk$TZKL-o25$I4oLjEP7E+oqwI2Fz-e=a^mH3d)E?~nlAEX{Do3E z{88HhoNX%4?TZX@9F)P(VH{^W$4Tlp;+Hyj><-fG;aE5vy8|8$=4j}2x_zDu0K_#>Z687^ ziX|dX1vpu6)U`XA39N+(ax%{%;>!q})K#tA*T+&v3 zB%$kn6VK*361{$+bYAn8+}Ccsym(Gmxm<-N6mjpUg)(vZRvD%up4s!Kt7zwQ=|&zE z7An14Bue|#9i6WT--}e3b7*w!CYAlZQ436ZH}%>RmuH&U++!A{%g}ZyJ8=>9%k3J( znbEEMsdY-688H@!hION*Ta!I6VG0C}2pnTX`D>js$c2kNM2(7U5ZHJjJ3Cu%ey=;q zs}_Jow79fF?melaF-AyFV&Pqqktb~%#2Q_^Ij23=|9&x|fo=*Zw4t z6P~qEXvUl{8mr+Ex=CldDuj=J%#Pq7C6Kf;3?>{0G{vDw8)BM$<~=wbh+vnhAZP#a zi1T{t)bWs$4j-+C7Vn`p+o_uaoTpP?w#L!Vk67b^^XTG`#p=w$>erbuBN?%~mq=2P zU0mUnZADcT*$S>*H(lICEd{GKwXS!c(7`lGA_B*Z+#80rmfMeOXKFJ25#cS7w3Q!9 z+{$kxEPs=PmA8F_67NGMhCg?hYhlR{aUGD z92VJGm`y}m{Zq>kcRYu4cAav^mseY%_}h!h9lEB&2}D`verUOaXey;lIDCy@)+1pX zeXh~hD<9yXUh=(dAj@EZ`Jyq!a!g(SBO|v;15lk`ZcAm(|FNpM18-ffnIFu2b2xg9z^GTH7G=;3R z&C+-sj(#61BkRhH%C<6bb-u^TQuY7<|MW>jK~#8{vppE6x7rY#a~hMCq_4q)Ja)oh3glOqap!k|n>m%uHSR*SCx1h$abgUazKR-07lCxn|ty zrtQVjw`GzvPh#KH{Me9VQo2diCUU%JbFQq}oFNhEdTjm~ecnn5*}h6r3YJOBL^3l% zO=;1TmD|iVUYoQ^9{75Z5yuE-dJZ5;`?X4vr_SmsXB^qsH2FKVdn5VZ<-LtV>F=M; zlN0_uNk+}yr~*Ip_Zn@)H?`OA{ou2@4a%~dFRQCFTN;a$BvaPTeir3 z_uVIHX})yZ1B}CUgclL5by;>B!h3;_oSSE9F!07_EPjKP6Gg%pYQI-_Xt!ebEd zH$>_ADs1^1Vigi4TK$7=2jWGj)1jQ&WuB)l3nswBf3&_PSH%Ez&Ir|GRe+Dv2SyR8 zeHbzbYvxwA8m9ZR&~|7=K;RuG-43Bl2U8;J7&m5YmmlzUcRk0FIgSOo#j-D?d((CH zo4@&9=m$LJXS^=*>7{nu7sS4Lf(q`IR-AvOhFO_kRtMhr;~uu&_yY)cgx9_YD{bFL ze=^-piE>mK%z-5%f+l}^fy_d$+&!^a-aa8(dZbn{U7f~E)GgjL*Z0KXivu>o%SDxd za6m{v35hWgDne9z1%le&*LD5(i*hPv!%5bm&F}CVQQ^~HHQ8JhZH^O9a$5=62hKWw?DYsQ zJ(n(`YFS|mM3R#Y1z8Lfhx^l=m64$?LI+8)tzbKbKO zv2LR889z{FcGT_0k@9~ktP%5p;QF3;`w)|P=%A>y+%*29YyunZA-#6LUyyEQMxUUz z-N!T58M_s>NxH#MpgoXAU z5Z3+r^^?E<{qGVT9WDR-=RalDs#S8u6<0{ZhE<=tiorj1>Qp1xCrp@NqT#`?dGltI zqW;}?-^mX@{2&V!EO67(cL4-8jbXl@zu9!oB%)0tm_bb<PA@N_TXBfeq+}j)TF)A%~ozQZ-V_B-t;8 zb+_PpBO-(GyqBmKrS@p3-fFo!8hw8X?s2-{2dhDK;i47+qn}h+X5sF#n=tD!eMEvF zE~LE0kOM#7mQjw_1S>ct7hnB6iYK*5OLe5*)CMm_#fC(#v;s&%gz| zo=7IyRn%_(qI`st7Ve`y84+m8vw{E)VbaEmu_Ebn?0&FM6P~bKWEzdWVL7izZKaI) zQW>2fUeecVBfRC-Sse%>{ZR?lflZ41v1$|0F&)88_Scwu?AXt5Z?&_+u_>5D!Z{_n z&+#)6q{B%P(rU1zj1`SyM|4g#!kW6_sjIW(vzZ%>Ej4drwwawB9~CCav5~g6IRIrLF;Z5if$qbO zSr#TkfzmQxD*Ucynm1E%OK!)~hq~W!#c2=6M`Xq`HR(xgU~gs-}WE zQs2|iG`@{d@x1F&CXSWJHRXh^z;@xHYS;x6?HkbKjG3Y@zx=X1_uO-4p5pV*KW`#r zPL1~NfTxsh=Xvwy$)}%wYQ#AIKKkgRrpa1)d5{JCFa`oU zzqs6VnJg%&kRHtv3~{8yhRa{t6-vvspGa6tT_vbNDwx|#s0vokiZS9L?Tsij;?U{v zNpzSIR795XEHjb|5y}V>YpBp1qKCmX1tI3E26`ss3MN3 z@vhgz8YB)da@Y<{Q7|Cr#JXAy4r;a##~2tG94mgbbF29*XFe(2%!wY5ipIDU8kk{t z;2|e+(Cu7Xua&jhIsgL62J+*zb96g685o~~C!PIB?w}XEivB=w9(bO}G!6iq5zPL^ zL9i<^Owe~_;24^o_Qs=`~$Vn*P>8I%?|*t*OL8hvSJlBdlpk$~nhzPWAwfa>R9~Q!PJZpKO|?=eN+> zA=IjGm?0s*J|j_E7f48suWlX3C0TKZ>tJiEu6eAL-3q5lpZaCeDE<#Y%xF^tXS1Xj zQ<^|yveUXZGEr?rF5|nfr&=c0F=^xeW&~;Ar6JL>E^VZgG}iCm5!%>K9rQcHVdJZ} z@0=EIqUip0^pqN1byQ1Jlf5etk!DOCAbP!STY+BJ^>+~3aVnBe?Ve`#*@X)iHHR7% z*>mR1G3nt%uMx!vW}YJ&O_aGR;&sJ4 zA%%(*Z_=QtDW3d-$mY2=;*9vj!_16Hnv_BCka`mpkdR{fJS%X`%*)x9#KQejD|s&h zm&Rat{V^7ZhSLOQnhJ25h2i1H2tgo2lz{|{U?#84>ls1(OUqbB@(`wbZH%K4Y`sL5DfJLj@QK@0Xosr@%7IPq@BR^h9Qc< zybq8ZgUH@7ZSYUQb&seBY^vnK#lZ+BP4xMzm@$k#f#XHA4!&FB8n-v8vUMWDj2N=s zr65@!IK8+h9f4G;f{xKz2zUoLec>C#9|8(c8bx|IoJW+KG9`G7sHW4%jPfO%$TQ#8 z_tmwc%i$on10F$?QpyC6(r6SQFiBB|FX0)u4FRrVwY<%7QWhGkN8+dUY7KOsSli%A z&Woc+;mEefSto&0hK3!1U4e}OXJdCDvf)>1x#xdkdDwIm*Y9olXytu92Lu7kl!oK>x>?aWkjb`(;$rov!@)9CfOyG^4Q`eSx_ih@rYq$#+F7_Ql?^pPK-sgDo$tUHdmtK-D zzx?v|H14mz{#wS5A8(@4{CmtX$LzA^lXCHyGiS<-88b}HH1%*znlv%nLO2f@;%lts z12sW)>((`O)%^YY@4uJS)KnQTV1R^&|FM`jrieN`@Gg8ZO1W@D5>xdQ;^=3p?u?lTE!xCNaPXmKkJD+YN4^HHOhHEsDA

zJ0tK_oFn#|4YUyfqR|bN%sB?4@{QDxAr-AjZ+i^vGv|W{0DeJ%zB#{RI3dt!!a}M$ zb#Ux2!k-7n%sC>Jjd3HKUk9Ymlh%%bwI5NbMhN!=5AF%-Pv3s~ZTah8|GLAy<6_V8 z{esQ1UX7vQYIIc@7?m{9BkHveusWyG%*sBeeHDd1#y{66MlWfbhY}YC+{NcJPZ};nWYT~X z;ZKdPa#LxYhI%VA)@kqJD%cO$P0l@0SXgMX3-7q&jy=04%)X8n4;)B&7q#Zpmg6iy z9CM6(=R~hLk2wCFYXQC|Q;s!~`go@dfOCf91Y0G2uKf(YTYjgBBwUSSpSo>RuBNsH zf8#r59yd+YlGV1D`L=}U>tnK)ON(YrrCZhy(xc#K>CkEqYMlMMZsO_2h@e|I0-o0qea2WK zdQFNRf*0>TB6z~%R^Ss+9(ups*EN%@(ZcH%d@8bPqVA?Inumeoq@Dfx>>vBc^+H`2 z5ozk*s2f9klcq+ObQ+~V%FGfbm91fSkuUyY4J^ki4!NK6t~Ce{@XS307yV?7U{et3$e^}#}ZvKZ6R;*ZI?g=ul08|MlxsA{EBUq0qPg?x7 zegP`P;Gp9kvFq^OtdW>Kj&qayz;9bH7~J^Nq73tr0caa%1+nq$`Oyr zoT3KO(z{mbZk;V21RD<4I?7eczPDV#d~BL`PJG&OK%FF!x=pPEm(TBdj+B=c>w4c_ zLhGi9=hx@#`cF;qhUs?y;H^Qo+S+6Ij^m#BmPmgU&vYS=)@{ONa40G2Y?ry-@C^c+ zQDZ6;bGlwGVR0!&=(fLD8-EaTC^!oIyQ_m0{8u?>tw@r75oZ1 zgFkpmXVVkF0pj>?AI&kf-q^2R{_~kvy_zS=RfAf{U0=*Mr47#WWMKUJoFj~oUcI$I zR%RBMuHi)68TGZxUIi{(xTq1-=pFarhabw#H{UG1di9d}_3KOX=FLq+nuQv}o%HW< zEb^Ml93WoQ{`7%<;PVgPL&awn;_j!Rrqf(9{LbE8fq=>3I?!8fqGP{IzSS zywEF4BFj}+6Xo?a-`D}9$B`z7LDK0=eT|Y5-tu@ge0FH^q9loshV1AF-mt64X|IZ? zVIsp+_+Idt-NqUfyLgZrb+dKT2;Ln8Hc<8;8l0zw$wwE8d~>s{xq=88@qvIIeW%E` zYFNZK6zQ(7p^g-x{r&AWQqAWQ0ieN5xe8yNV;|WEW}agFAmq*5Yad{(Eh^e^9zfir z1aeO3Ob$7-Eu!S4sr{_m$EZ5tDNEWIJs3!7q|oa=VenuyF(aGD`l%w5)u2MScT{0b z_wn;Tu{G+u8go{|=-j!p>0{vB6O?th*tb}{HD4Omt&S5J;WlNLI;oK8AqFn4p5Sje#6)P%9zlIy=4*uDdDxy?S?hO#uCgt15 zXMWJXnA+EKdNq-!&h9Ru%?C)?Wj~7iWrE1n%2^kFE+NhOOLA(OM1_Tj2S*rLQ`25E zk!iE;<3z?uOSV<(p(+?J5;LBQlxTh4d*U-v()n!hb~s7GBBI0tr;zc)7)(SU$BJ`} zGA$Z>^}E5o7aoFJ;H~x3l#BkO=jbK-IwG`WNP#o;oZ%p56lK%C66*1qDCgdS%s_hV zIQIm7aP}_KNp}i9pQ#*46rTPj96OO~9O9e<_@85@3wS}gO&9NZsda1<{XbF9_0aq5 zG?O8Et`OKv@PKC~I<yh0WN$0e>uE2KT;sBsV?>fZo>eZ`FN)aFmkdcvL#4nMu`1trgt;=SXc4A_p zN&O~r?O@HCHM<=DMaoi}Hf>A|_XQVRAQLA}+~K`|?GlNIi;FXl!zn6Jp(`>q*tx}J zrUT}r<(cxAZV__K()*?UxLd>nfsulhs=^qOcEHcnO(&@sZZpDqk+&(CO@985v~7ec zpu706f7(bm;`ghYY%~}H0K;Iq3T~pKP%}G&BcrD|CtW(LPtc~Xx>qO7dhr$ zWgQi=q*ObvC(6yJ4^p|0f5I9iN8M(<*%(07SyP)$iZ>o{BHyIo9)%ouOG^pb-BUsY&{VcQHf#}>}Sd&e2 zPWPTTfFSw{R|EpVjRUlXLG&<5#ddOhl!PZJU%@S;hI2nEN2Em-N#tS`f=+rk*rq}e z!8k)jE_?*v!5ysaa3v$YCOswIQP-I%7oj*?H_p_D@&WbM2<$+>Z8+L!21*3CrScTc zTc?iOAMh5O^ZAvwCcAM@<2;To%#aA#wXIcEa88&M<9|>1X9-ptIGW+-z~*u|3}^sK zWSQ?9z>x@Lnn5zA3+H%vq;)b(`#+Q3E#KYh%RJNd4SR(1M(-H7o78Z`G!0L&O`L|P z@IUGIy#EIk>DUR_7`zYQIHv&{S#xT4c=S;xf34J6^}TpLy;MBYUXzez-%4!h771B9 z$-c)AA?b4^v?x=qd2hCSJAbq3&P{I)2aLlaGEIkXe?*3dn9`!a6n`?1j_=aYbT^FVmgCUv-9q|@0#>((!LuvZfrcF>({S0Vt7|z@#eL7N=fl1 zEos`cX(q}xc<^9TOU-Dn?b~hilV)n^>FFkQY2(I?CUqPEjeu~W+^Y+3aao0I$hJ<8 z2wl7rqeG-!{#1$FOnRCw%y^MW(W2go)HK8>4^pa08Ko;cbFxWo!>f+>7Xgd$L8B68 zgCa^NJ!=gHQmm+h!k9p`cE3O$mssLNXfi+B0rghYDf>qj_?a{>_Kj^#dRpY`8*S5< z=0{lr2H>eiwBp6aV8IhUUIjefaL5#611~vE+zRy=ne9!=Hj!;Qp5sNQ2Ah#%)S6Ql zhM=du8j+mX+!AqM3cUlw5c^7VIF11)1xA2rxUviU+233oQb1r6T?jO!{uDl=&UDh> zM@WeBm|J*rK{c*Dj8%+Ve=*_0MNNZiKbS)k6e*V+V1#U4DxRG(uM{|2;m_&HD?h4e zL-@fjlmq#ifQDqRk;sJ~OGu?2pV347Pahm}FqEt4G?`Q?u;D}Mxe?|5*;jy$*<_W$ zQ3;@sgJne>dU+a_ZUfo|%+x7cmd zUnQ}ngdpaf@-o1Fay&s0QeGmZ<&|cR` z#v3IoE6a2lpEYZiNlWE9=47{P*G_J|RTsKHLF?A7W!SJ`rUvLA|M-VVF+ca*bItti zg$ozTi!Z)tBHLips8RCp!w<_n_uOOtC5p~#-3Ww@q%nHpzY|wU*EHMh6gueLBv#V0 zXIp~6Q%ppfv~6mwX%<3#Fez7zwu0awZPKvXs`#W%&O~B)y%HWCcZeS`4dM9%@0>N|s-Dn+Ok|0U#V%azFPQGyDwG02^pUmwKx5?u>v;8d3Ol1Le|y2EwsX_N zhj1lMX?P9rym8p-zX-&*Mm7aL*l{5OspY0Li-w*w&qNHu&3&#`jvJ)!Q+_s%a6MmZ zr|4(-8hhf*CQ?pqxBq_D7GOWHzBnD&9T265d&zd=_;B3wI>Y@7#WXsjCY|o#yqCTx z*d9cmTMgCMUux^sxmH$<7n%NldQS9w)5RQHhm1WO^*AhOyc#I<2_8@i*7HjHxu$Q7 z+8zk(Zs&?8sik%1`-5mac1Hl{UcaWKju{EpEhSXClrNN<>x`8rnoXC-hs4TX`ls!( z%Xh1kSh@4~_NE*7HAlCS+lIF@wcCj90j-i`aJyuQ^v&W%Z}(}DXu5~T>}Bn}3l}c- z4QjNzIGwuNw{I`~`}dbYg9aI~eAH1#nJ6|)Txu4aKZ_u9-MjIe+JUm5$K0%cX2$ZqvmXq|BPE< z=3_H6kxrG2dg!H#a^1465}DS;5*i~ds6%Fy1P#qF6!4%E&4#4#>N2W}v^Y}NAVQ+k z5X^#4tY;muNsU+_s8w#NB$uZG91%}l8QXDy;Xs{p*K@6q<$;mFXEXmgkO&c@kR&zV zi0dYO?dNY_rSDTgK$8=^*^nrNNxE3R3Kt^Nc(iGhhT%fg8P7IN*ofjo#7opD0vLKY z8`uw=0@QRNkeM0I{$LEyqy=JUKM_U#V1F-n_L|#Wd>>)*%5A1h(FddTyMyDo7EWWFWETB6#*C5MS^H86*f`L@E2Q zs~eV7W`r?h~0Bz29gEicmzr z!Bf-_XX!b?DZYK3ZOX-c2`^F8jpNwiO2mAEK96%5F5Aak!aecC6Hn}UcX4W>^U}q` zd2l+T12`Dr%rO~pt;a*T8IEq8`PeGUM(eTa`G<=+Pi@o|pqqPh#)0X%B6WNHRPn|p zOJ(P?rGhfI*25(Pr@YC?Qz1?77<$8COZY?2lea-<@vtp8m`paNE3jWoH;L49i5=lz zMuQDRN~${KWgr=n#mHdQBr9by1)AgcZtz z@3b9>13|PmUX7*%-S*rMmGJcUnQz+9bO5Q+6?Oc-Y~-Bno%oE%w|~>mQ{yENG&N&e zh-DlA7&|mQfixjUqS$!CiM}%qgytzm*!wPfqSP2{L>ur-)2xOxF}8!}8c~gqhKLcx z94B)~G5XkVL^9{C{|#1T2Qy;W7eqGX%irVDRM>a2`v*8qjHqA!Z9VK58jv{W>(~cu zcENr|PEL+#5a@On-#2)2^iuhCWv1LQ!dK^46>vY1l|ee>KE#{zOC?e-+y4x!;`hpZ z{^9u|r@v~;C=OFlhJ*vx?Jm9_!P>}rji|&CNeK>4(S0;Geg`0$;4MTh>ok)zj=sZ+ z)L=mDeRh=Y;=7J_M6u zm_k9IVvo9u=WG=3FVE_Jsqn=?!FSBhl%&xchO9QY8Ak*S7_l`LereCsjMr^cAKvFX zBC_pd1H|ZSDHX%vpy$x5-=(+DHR4%3N<5q!J}0H4c=h}BPW`Wh%>T%@Lq6ztPq2p29~)HrH1e~50Z%%~>C zn}9hr*^fQ;m>J835$|IEAgi!Uj%t%Ee;?6ao*J`Ep89c_tl3&9n{tZG!<(_#My2tz zJ2S3{@*?<%sxr=Y88%o3wAD*$}&lK&xoovP7iMUwxnM1I7nY=fz5p z)JH)y)Ic$A#7SkNmhBY%EK;`daAQo69=7f`E64+ZNJaP)0U&yS=w1AceJ>;mIaB9N z)EaR>iWx%J5!e6$L8?3ZG}2c${@xiP-`#FUsZcYASDH?yt0&p0HnmwCH|J%QKF6_A z_q{7}qn?a3L5R7%?nh%a$Q*$UICrEIVn}ez>^q-<@x-Vwes^(DV|{j^q{LRkz~!3Y zI>w2$k5NCyBk2X!Fsl-{r->W|mVmf$QPaS|nbO`yYnhYGtmv8$*j(E$Afg#RMbveOih0tT8F7h|msIl2DvA-)M8PQ$ z!SVX#H6ov1Zux>tF`B_S91SNkqAIzao+EwGM2s<5a0XdhCp>8dXM-+wM3&Qeyf%UF zlC_1#puIGb^f<8+_~8z(f54Py(az0gZvN7r^7;Nu@r#joe4*H!j? zqSsKF-AP2?+{Oi6T}6GjK1qy7S z+x4(**86G#K^+kmB2nR?rpL~K7TEOsA(->j4@>2-(Mx2;nk>`t+=Yw76*VfdUw!pe zQxiRX`gD`>OQDMfQzIW6DnXz`enV&dQ|NUk8HYq8}m$+4G z960G^ze5dh3=SH95J~N=gf`+)B`_teaSvMI3enT8lMdksbUev)AZsgLS+txeA+^$qX-+3)Cc*NKc#o5KR1{dl2P70ligA!#Gm6DNDEXncJNN>fM#$d;Q z-^XYW9U_H#NpG)?&5hWn$&d>d2LiiVJNFgR7m0}~`>(+`p=K*DA=rXK_l2@5`gdHW zA{KEyPI-<|UT_n$9OJ-U z_p9ztkW8+XV{KHI=yu{b~;FVJ=a9}e|gRZ1h7RGsJ-!vigTLF5fQH6PGrE%*4Y4G zGn%We+6@l)e)U{?ydLq4(Cr3Y@4ULg0tGCOT z6@{{)@rhD;{%2Br#YA~K`VzV5vrUqjUn*rJ6!^Zbfi2>salt|fVLcC47teDA+w?f* z`<^&Bfx!N1&L$c5>;!pX{0e#H=aq8t8`I>iUsp>$b0b~2I1EvvBAant#~gEva-+x8 z>oAIo$Tpq7=>qOz|A4VwKQ78J&CeY0Tv(__iqN3ZM9YFuFk%*>LnxE(JX=L5!qcf2 zb3jcL zuvEg!O!-t>6_Plk=|=)bk~zlxl-kXfH+^)W$S0Rrrz%q} zXkO-sYIrU>(f%9=C(~utJ9fSB$1)ZEIB<2}ybZcYB|KQ$z%k-f$4)rnd==1V*jj8j z5eEmnSf>0>UlpR#q-;~WjRS(oBu<|b13mssJ%2n$h5*gr=q2&J+8LCIkzP-lzJE$K z4s^Z?nx3ky;87tQqOT7ndjMwzwhrS(Y5ZEhjd)equWo*h-2U|%x#qoD^3bY-4H}p+TZnAxufP5FTlwXeUrhZDGe}+RA0V!2e&&x& z*+r(KWqw7NtZ#I@cp)If98E?LojmZ&Gwy=gChEJK+OR<2p)JX`o@jt)8R0wkJ?n{{ z@urfTk{!aC`l;4K?ca$I5Y5Iwn4;pD8fXMI0-13qq`Xl_&FC;1j?mqheW31&ZLXVa z>Z(lC9IrM;1S#j#GV@>*7Y0jxB{2*d8lmAar#T88(fOT}J!-8T0uKbw5edgr4%lZR z0}vwn6r@>6wL!##F17w0R&fq+?rEsDuN(Zh*ssXQEs>J43Q4V&$kywOs7?(S#*Fb| zjOx~+YFIr))TxsU7EN>E;$R1Y3=RndT(K*G>kr-wpEzYeM3zZ;hNIvuoX2Ga${TSu zOC?mthy8-zM)_?nC{&KI4%u^n(^z?Fz(9>E+Vl(Lx_+iHVM`>!n# z<2>MGr@or?i`r)g@OOPZzXTNKePRO%PV+lb?-}=*ps!yyRVvh$$n1Eze6+QNY}j68 zMxy?-c#Di%vPDYNzG|HkCx1V#ot)IQq4e{0{=TkHJxQ4Qv=#n1Q|J%Fh)<40eF4mmG}bz5t6>`)O=wSkn(2Ipp>vMBJdw`t;40LMw!S?beC9i;giwcCSk`{Q_A z9JJtGquXn2q#B7;0rvuiCuxuS;`$1JE(H=3=8??8Y8XAt>c$Di!-b1O4Rr1%wU|!+ zq;@0fXne+V!FVAMtl`v*!=FxbF$6JXNQetWT+gd4m*Jp|)@_odM7p?th!5W4-iHr} z?y?PpGU5+mMpI5WjYw_xOH>3ZpE8vKo(crK!F!5x#DgO^ zjl&)XL)Wt{x6gdbHXv<4y8R8xy?Put$!TsD2y6AbDS)q7v1{k5g%%;6_E#TPid4+h@&hpX>mY_mauCrQo!c5J$3m1nY zYD{F)v>YMLTx>-4$dMz>?CJ*}d{C~t?mE+v(nZaKI+@>AZIhnO5+#O~!af)ct0WMw z3*fjFc~aE$Xz?OqspCSxG9H7vE`+m5S=RSrAdohN2y}puX0|onSxBAeD+CCkKxAyw ziGCrb@k*~y(amS@JLE#zI4SAWb0ex}ykR}k)MYsyeCB^bLd#UF2i7PD`|$y};N%SBc=JK#9jKN_PES?~wPZ3NKH zq*86Ltt2(_dR-wh@)?m+UbJ=WI0A@fx;Utjky|WvBEzaaR)uSyG({p^HQ-=jHyF#B zS)Fuo?n~}F8uGbtaj1djl+^mRJ>H5SGKdDrdfIw$7<0&tTocx^SGwxra zlw?97Aj?$DArwtHlWy1Fc?)Q=hUi0768)w}2wVn_5%oqW)A5`PFCyM>+6v{l&Sxm0 ztI&7AKKH-ge!hy=5EYc38gSU{J#aASzEWbgNV$8}&$b?Y`H#9lspt*Sg$kzw;*_Z) z*Z>oru=WE&+`q=!`3`W7n9MXk=Y<0@;b=5B_%8i^{;7go`JQco!0!TJyXbbjb(_lE z+&KARMZT0)?6__c#8{tY?E~B0Tfd%!XgQ)uJ&BKt`y&$#Cjt?6j&I!*@g_7EPrDPv z)AcNUgYI_?to8d79COcRdhZ0_*p3a%=rOnJxr@UQH72r25&ze}{$<9U0OnYC>C#1x zJn~3kmbIHMzJHKaP$EAq%8=B0QS$WJ-Q=dBt>yY5t>pfbJIa|o8_Nk@8cP3Gi4tB> zARfdmo@2bd82O~G(e-n{-zvJ0QIpE{C_n^}$pSMD-6Uj(MJQlc~JiSNR&71Dj#UJ=Q5s^qD4Sfyc{vGeLna z)`>!$HzU9}hwwWkZ2mwL;(XEfd3F1VO*`%+6bVwaPL7dfo3=?|Ntw)8n4P4~|q*hO@FCm?~i#&N)TILJ{iIzsL^zJuI6tc}!aF>*c58vG*;XK3 z8rPMa!ZI^vI;)^mR%RARNm->7mzGI~I(hPZ*EJHe{97wxcYtmTuPbS57z0GL8E5g+ zgG%=EtN@@kn%XC3Khps1m#6hZRH#!wWon?+;J~wN=1nWf@xWk*5E18e?WQ5jlJ7)5 zzDQ(@J|C)tGwUrI^~Uo{jTYW(#6R;y13?`ebyf&vCu;716JiV%KXT+bNS*XEK&l`F zk3mHgfqHIgw;^QaY%_ifqUV?(eY&UjzH;A+?BAjL$>+K_BtYF>W?qRg#;WYeH{}y* z^qCBjD!!*0V3Tp-9?2?VjF<1p-GO_I^mSKcyEqiFdZHCwIAY-fxCZAe5#(yslIqQc zLzy7A~P_13Y6-8!G|%4mXiSc zOS82JPuleGNEL0&`zC8kf3HaHAm#P+`5IVD_6bOb=Mf0b8P0lm77pfTI35snUZeKG zPye){d*I(}1{`VioDX`xkoM2Ig@7fRj58kx1nJvfU27Yua*hYyDqd^=`m;FBf4&R- zUc4PolZqCD#nbCb>D0WT+&i+PG^!URy;>y5(`R&&AFpU7mxPXy^VZxWQ4=4QsGOA& z6_oKlc4n zvkIqJ(0(cLew z1_g7pscoW(2hnJ{rIPkW>NEdhJV1Q&KC~X^jw)uisF3b@jv59^dNf!%@?tBh{cFg~ z3O>MkpsC4rI-EmVq<=9Fl+U6@8!~o)!NdDV{a*OF?@0ww!ai5)>;EUx{|0O9KzKB{ zarl5+1EM#_waM0*6CEI0agf(f?GDVb@7=q%%$_}4Vq$#8oQrB>TYjnh;qKjM~q!lE}QZz= z<>g7!rcF(DR-HO^e9yW#=z$>qK*bj+$$@}ZE>QuCsI3m*$5)hh*tS0q#wXGUOT{u{ zFcHi|VG(s?iqTCQ{=_3rvrzhhIDiA2^tB~aIK9d8BBF~xg;R-)(m<27WBXbam4ieE z-y@|Fb)~#+vl>~)^Yimf=^xE(V`J4;s4i6GdXm zPgS4>I?@rvXZ)YYkN2r9aFUog+B{dOYZe{Iu?ujPbKWxa=igqke2$%g4MJ124yQ z`1p7kHENVR`skx_|NZy>{&3RBUF;jM#IM+rFZX=8z;wJUQ3m63jx)L>Nx$NW5&}tM5TvRx;a_VFAgV5B+jO+Gzu3q!;u1mLYKU#Vf_Uz7 zmdLNqi+rJ?b+Z0TjWiKxwo4ioBfK`wvc>~m=Sfdl;==%;8424&;PN_*2%_E8W79wl z;{!lqbo`_it^bX-b2#0{Ne{!|pmq(fGz~?Z8Yqks>cy$o<`^6xylGNHWNz5~HpPzA zJ!N@j^w;dna>~7?s|f$@c3r>JhQM*j`6T0x@0Sd=-(D2&n0sVJcBQQDc&!w+JYK>U zd@51jTqhwoV7~Z^gv@+H`ZNlYF$*(f)Xde=uX2_|t(*9}zy_iD>C$z>hcd9S+AuyC zUp<~pK9?*$S75tvaoC_nMfS}%-z@+4fBzTs(XyrA7zG!5jnc9T`FZIUNze6l`1YZ! zqEdcal_l*{{9gm9&{%%n6aAyF4i z1es=H5E_I|N)}yMsWoFpC_tp*J*EpdscY1cQIiJY(NqocN33FW0J^Etv7CrB0$M-H zr0EekAWc2BtKC2L0mEg(RO@}F^C;V=e$ITB8XYt;<6olEG_fHakJK#E)(ye=YT$4R zm=Rt|4qq}^8s7XcTc^7*RMo8opm&xXw zVl(@?43B95a*E0%Gr!o(FieRJm&@9fN#D}nB&4!LnI*!eb<+);RB#Lq(!Yr+liH2A zCQY0=QJRK9UXV8tYKV{YZU`?D6(&Ee*6uq9<4@Q*`K?gVqNXa8joM<$4 z)ZH%@N$zAtKE?{7f#^H6cfUNV`}c_+r|t`>YkdB%|F-+@3JVu|0D2%8eahUdssJOL zre_%Zjw7lD5E*K_;AR(BLu4cTSrZTeE?gYMfH$*?N@ddWOnLj)H8Q27xl|0f&k8%Z zf=sHxcUq@&mAqp?Kzj4wyG4$B)TSnrHMVM;t;%+L^*ZYM9&>z2=5}G zaX!P3aOvVvmOF87tB^MJ$wYLS^1<_zHu2i>V)4N3i2B+H);J#UG}&+FSoPr?E7J_j z*iSt<2xh$SJ(b6g*6(KEEh3GMP&;J4Jzv-noO9DVLf^xA3Irm%9^=8zRl7&O=Zrr@ zHg-lvkw;P@%OqsR{~5u&14{EHZ0WbsA}Lxb)pRZiQClgnF{NHB<)KkhEVj{QujUDI z`GDp!cJUVZY}N)@m6>lQz8nl7K$DqYA~V-+mw6kqO%q*85?#1(QFYX)$euH2j$CoY z6*6PS40-3BccieeP%<+!<-Pablg*ozG+gWzSnjs!B1jj@52mk^jt%2Y7vn&5PK!4s zUOz6*kS;OXrP+j=Bx%k|!dMR@Od+|7a!Xb#$6F!Im@Pym73 z&fb0hD$?PkKT@Hn4a2+acG%xzflB)wgXWL5q`Vo(b9Q&Y6 zmBR?Z0OIdXnikRc#rl3g>Nl^UCVa|^Hj0i@fNj@pY6-hIO9i_UGSAI=$EK1&@T9Pt zv+4%~HltTuk?mp+fXF6IJR+=$f*He*Xm$;kBrvv=0S{GA7uOe)Nn)G;f6R$>;o@Ke zZD=-U7t3=$Etfk!n=k(!yIgMja=F~NxPxpT{D{bzZ`$tMWIeq>wQsHuYLxP+6g@$a4#dfa6*`4{nbXF`JJpeC&Qp|Z);Z|ylKKpUze#bTPHe= z->?-rpJo4s9YTaW5IFz0uN5!03t0m^d?;)-M5dRvFg+_gyU1Sis;$wYejV9T=#@z& zZKbq4K*sV-+c2;Iq9U6#1V@@1#O)6vzr)RIJ2E7 zB}V@SWd5MbDmBrh(`8e)twx8b^$c9R^>>~pt$X4VBA@@o3TF%#yw=A)Aaa@-Cd@M> zrH^PdwbXdLn+~wzoqeJ<4)Kl0f6DW=>*^;Lsi4tgq%Mv+aO&mQ??7P8xLFr2_5kH- zt!-109UT!WQDIeBYPl~k#F^#F{ZoU8(DfixZ{cdCi&Jtz6r0it7cLG`knQ#3qI7wG z>N=DCMW)wA{deq&TzO$mxfC^2F_+xdisQW%r*yjBq=Ipq@~z{jg%?Q=C+a%zW}DST zy)_PL9M~qa>RS^z-6@*NHW2BQJWYDWig%iRQJ+n%Ho_i9nB#!L4=jfS2j~B*F3A&%Lww~j%Ul*`WMT*+&VITU4>+K2^hQOLuJ?VAAmuMYR#zbboLAWUeey`;r;p>-0gdAENQug|)Qwz=exih#D2y>(;H49zA;O z&{aGlB0|c_%FI|V7kdXhibZ7=5*ivJPmNhB6P9L5gZLm(t<3(n^ zZHW5<0YVztPQpg_hmNhxtY%+axTps5OUh+SUa=&^L`Wo|%DWl#9BlSc7MXKqJn{H1mtJ8gT!S8clXp|LbhYmeX()p-F@mA&P^5 zMq-V7Ny)(5Wm{Qf?Mzc9ord|>2;)G|B#iFr^mD=1AcDPYwB>W05rHZd8wMxfe6?xD zt9T~!=43m@%+zTgd5LvI&``BWZ}DWzmx`iX*>c%bc_!j&`Nw3hOkJC+yA&$Z;)hFq zv!PNMl^|ZVIVz$OCBN4VvZS(!tlC;2FPzt1hIeZwKj$@)Np0_xp9g;~^9R2srH%XR z!Ww6?mlkfyF+Fnp_mZCx-3vD6=(=0gg?Imhy7TNpwUhiniN*H|(oGZH+Uy{3;i5L7 zMwJ82&8AJ8W=5Y7b!FVc)TvWVUG^T0dvLJ>Jo+Er(NAZtHzT^5*RLZ>Gjiqe@0Z9T z71cCCduy_-Pv-9{vdW~ao)Qee!%O`Zojd1$Vk2mzT_1Iu^^*6zN=3H{SG?lM?QIl` zS~Vio;riY2+CWf47O#c`4aUA!v5SCaE~S|dsp6e~=@^QkKvbDds5F70_R7>s>*r7- z4lwA6u9NoFu&c-*-LDak*i>+8#7R-79t+{UVTQ<052(ShKukS#bdpUgTRlN!-EYbc zYP9tJixuEBJ~M(@SprdCpybVLO++}IUx{dAq(IQAo$Y%}L$(^cRftyLsBoFlx&z9KIK1IWGPvkN z0tf!ii7L7q9ciQ9lyOl`MOr!KQ~Zv&M65D1nnr1)ogaOhgoQ^)cvUiK4g^?w8tO6P zkS8tN`CjPpLV7qtn+QKm%s7X$-%$};*PaW1@Ei@%=*~{h7Si#j{?~SHKk0dquIJgz zy$O$5TW9-f<3JBx{H26N==q6`lQ3PQ5ychC!~dIKEFU#e@qFaJWoD7fKW%EU|MQ>!$m_4aZszZ%r3LX^x~LW) z0~(&C#z)Dq9U4fZy3um|;MOv-TSG~!7cH$)Vr4+?QyvZeh+;za~Q z)HFO>q=W=P6B(wd5uR~~!SNc?@NB3O7s9*mb#_KBjYEO~V*t-H5p5c+p=fEgA>UJAh##0Sp(gk$Rdo~3ePxe?fM2KEbe9AT{Y}Bnf3xU$n2U zsgWBPZR`W}?+bk+N{H&aI3$o?TrPzr6%ubci&w#zwtFe%)>+78z^Y{{tFo!BM{ zO)BzN*Ncp~$2yY7KWsU%&tGiE@a`Ab)MXsRWRw9)u5f(AlZh&{`5c_Zblrv%Y2-q>48$a&6zBD{|C4gBHS>Zx5Dqx>7|9~TaZYJkOFhok zdMrfb>A^x1vy=|j>CW0iq#CCM$Lue(+YGlEZGmeOb^v8$D}J<1;@Aexf1H^yNm8kg z@bCyT+Vtw9TI#=z%*e%moY+Cq%VXuo{FX94a+r+EO_Tr4+AbT_-U4e?Tu)g=MvqsD zOy+}{Fcn@|u_aFmO3KZnwc1}Tl45ok-N;Yfo7R&Kjcf^DZAFh}2~symNAx}|k|Zr5 z#+ZmMT)5ahYIN1@*|VqIcH3=|l9D19TyTNJ#Kg#1XPsq!W`4Gdy#otf%j7z8cF!i# zNJaI}%eKh3^Eb<}?Hibx*r#=GWSey4l}LlQNEy;TMb^i3l=a@TfmyJN!(_I$nG}o2Dp?=peluGIqf0m?vr?>q^knvtj_@-KSm)=KxVR z(%m6o$U4dwfyZzmQb_7KP5}pazv-sVzeF4w=`q)ozy=6=Muz=xw>8E_-)X(@?#Qr1 z0;G!-0xVR>Xbk#5jtj9)pRA7sFR0$y}O1S^Iw@8I`;o=|!VzXb%B)MW> z3&WGA9MMRUVo=u9vr`tT81$gWDxbWLSu4 z{B`xwt>lJd+DJpSttKqnY8uDVX9JKrjgTgr5Qil=zH_Rn?SEqQ5_$By#WH2(Hgjzq z+c8DzM1)C9M5qjGQ(rD0&_ddz*p%|xik8W-@|Qs^q@~)AAVSXxjws3xu{B+|a8WhX zsK}l>cdj&V-dt|H@kY7lo_pl(yYH5>&pum{laqa~aIrTC)kQi*#q+Fn+vVdK>*e!V z8|D3J>*Y~hA{KU7DTIMY7Ww)VEr!^WijGt4ZLfnE0fPkkBhUX}4+M=9lH`MuplgFK=ijA57mUA5GsNFO6Rz z^Ec+m^fg&>@2CZ4WEK51GIEPe1F*mvekMk6EzujNXY&NpWcGhk*2;TR*XcIb%QJc% zt;;Hs8;7=$C(h_9PoCLT?m4l89NjLZYCSp*AjtMOw|7(d*J+*Q@iV%}qo;L|dq;MZ zeyx&fZ|Q*x7qt*Idbhmr!V5BBzyP`Z_SE}8XIE(?LZR0Ok5n)V8$q=9MR!n5*t+ok&QvgcTYFn{epzF z>(D}E*VehaE?iUrL=ofkzN9hXybU>~85%&-k@=O8xm7AC;|ZJz&$XX1FL<^QoIwpR z!WbS21kaJB1<&F{rmJ?E3ge^yE^^F0wui{lZ|ph%-yQ#iZARK`knOOIfOZ^kDeXmu zKV(zM*#^!p_>;)@#K%SYU8|g{T-W_VGua?6s+NjuGQ|+hfe6(#ljn&%)2oI0>Pb(k zE%B;tYRcbfUP>gKi1%c*S4gGzcisa8dW)lNr*RzUbYm~d7SDvoC1m<*a#WKD6X7Q^ z&G$|R?W5Wx8?pV`q}8SdnKFV74dZ+pwBXvIBoDibfW~Rv)8wNW>t*e>LNiMm;m!K* z^u!s_CDjBv+BS$ck@6r{N3BDY3hllvljMve8_Ow2q)Den@#eXMVc%W2s4b{b#rw)D zubA;&85tQeeE4wb)vK4h_~MIZMz-5LVb73TTqeJ)*e0{rWt%9q=~C%SVe$u!&LYD@ z%-@b|nqay@uFTArP1(i9V?U^Un#`(qoD?=cR*5^xrd*N6HtjW$U!S-2URxI0x@bgY znu=J`$B7Js#MV}DGt-i^IMTf_KoH|wROm9p5^+j|nn(akGbv%zNJEl8s{z4#kJrx$ zSHpptwMaD-NbRQUH{F#$q{0>d$0A;moCWo5o}Q zLYtqNi%qI|QVW~;MPo0@u!wpi=7&9~?>o)jTR5#*6L3P0*XK>6uC}&8Sp7C49Zs== z6UXt?S42L)N_lsY&4i%==;}$zW$%kD`A#Z|a-^i(IuU9eh)l!tG%)oCj&D-xM_s2J zJz8H!v(rL*Zb&OWsp`w*EaD;g71}g#( zyVO9@;4B!ZrOsZf0=R)CPnxpir`t4eM7M$N48{m`)kK|%K>w^lyXR%rBaOk*exyjp zQ*D$F!Q5X3I?-oR`9_EVBG8EJxhlr@CZf&p(A*`ExM<3T=iS9Y z4N?o~RGSbJZj6*a1#o*?R#=}z}O{X=89|=2RRT9 zZ5zazIrf3bE{~9shP`a0)gSc#VC^nY9$2X&_{V>Wd~=I(#b-83%6)<3x4Ci{+yz&` znbZ$=J5OZMmm+VdP#&wIk14#j+8Hq=M5? zoU*8F$z?fQq400#u6Yow2p z6@U}LG&=M9F0xizCp{t^G0yubU!&0(S#697P3~X=29$|8V5}MImoMGF?%E%ue`DJK z4rLUGrG zcu0u5Zgh9Eo^_Y+{O&sglVKA!$tKx^`?SMO-g)P>x%Zy)97QTq?}mJ9JkJyY`}ZZo zWyYV5l23m>UM?P!VaRqye7NcA5~JU1t*{(xe}l?`U%$Rku6|{{JUb&xHWpSIPq&4I z#o>?rzVgx!n||3VSFV(1C5L_b^pU}X2b<7wi#c}ow8o0ZmE~&HbhHxtqOWVys^NOw}eFF^4B1J+Nj8^__eSg7$HV5!G*UcOz= zH9%zKRZ5JN>?3GTRWePray--=pVnfKX_Ohf@hB)1iXqhevzr}@H-s7mWzJ)|pOS2b zJz?yo%B#MHE3wBbj<*2Pi{Qpfu?T$i!PkB9q)ZSsbdUZ)}pniu&&>81EL;K_Fj~yvzd#JlPaK%d2db`c|T0 zS_N|zrPY#bi$ffE=En6%Fh$Tj1PL)29}y%O$q5oP{1T_yH8KO^CthU?qsRhMecn{k zOn+)3vp)T!lSfRn6l?isw>a{lzY=ZQPSPH7`h$*#n)Z{VG9hCi=EyGcsgW}nxQYB~ zQF4u;n7M{m7P8NIk;CjqA0?G1dcg2cCoX@J?$aBbnljunx&Q@PJ<)pf%SNW7 zPbeBgVx_#!d5+<#HyzurlgwCG_o zt+(Ei`Sa&XeSN(gdE}9D_0?BP_wL<|r@Jj6-296#zA!IxyHiq9TJ5t@5CppS^t9v9yNM=Hq1PAyV zg?>e^1lk`rCH2E{BDlEDtM3@?8S%pf&4HsIoBM z@vb49Ad?g?Tm7L!d?B_ms%%i0g&v-pXE>zN1IQynj}R`3sbRVI$Q?+u_r3sP(PfO2 zD@}iEzK^H8WP_uOP$9Kcl?9WtnxosQMXK`kIC>w1d=pj<(GE~DgO?gb2+uj~I>>SJ zhNrk1d0P;01Toi0eq8Sp^`7gT;wcE^)gP&XHPKO6AQljRa@b%LhFHp$v+SAsI+%vrRc<8yOgN(YmI(hE7=j8h9ub05U9~6y4 z5&T>SB+`u&F~)UF6>MBC?_~MQ@RUI56w8?2#RV|tsBgd+V;&rk3w1!@#H`Y0scjM& zn;Kxe+CK@3HsvmI3~l6v?quFOyn+0B+2JRtOq}Gx1at$pl&R0B6+y^KUwE7}=!qiz?fA#F1O1O%WXi*`)Rfve6rSm-7>ihRohF zTUtV*r7W_SRQpBjm%MD|#J~RaFS+&BTla2G)Ru{B`P)jW-*Ljm(LpwT;}{*khpsV1 zcV&O)JS+4A(!=ywRE4EVz=kixFH4`72RtjDJ^BCV3LPI%0e}B5MFg^e^HV7YoZQd4nZSEGCwzkNGmXn*^DJsMi z3LDWS))cx#59_RY*GSXuDpWpOzFn%^ZN3l@*kBg_g$oxN5Bi}Do)qmOwwKjNtS;caIz^cFK=U@0$Wzm^ zWL;jR9MiXxOzf2?myOLdR#g`ErP-x2FfH0V>+Xr+a^WaieT5iuKCXMbT&(}@6(ce< zx=Q(?5C+{&onG8{@Cn63LF%Q~MmsQ*$EEkd^`U6> zQZmlHGJY4oqGbI~B2(^B#YPn_iq3p`lgQWZc3fE2@y728Sm#mP=x<0KwqHOx!6Nr~`1VQ3Sw2HrR0)n>uL>ky7Ei4JU*N%E zoMCJ^fCL+Gy67O6l-9QFnI{_~6zz??!!Ru@4naV2GUCJJhU0q49VhpdKkMs)k)0(a zI#m3^l`Lc6C9MhLDMd7?OM-D1g6s>EYUMWedbi<2-r+fI?K?zSA&c;gB za%|qyddxJ(;0^Jn03q_2!lQWBiNXSqRAe6n8(FeZ_uVo_8vQ~|$a04SMhD}SR}gL1 zS#=By-PC3=2Jc|KFV!!2hTJ!XYoY@5vpT;psq|O<#=E7N2N={bLa2;wS@xEc^f+6# zrJm@lb9$V{lwndk{?C%F+Zt8>VCAJ~Y-DJFTs9^{P92mg5y3wgB}%I5=OD?%Y?zrO)x$wdZWyp{rQc_YP4?XmdY2&qaEv+bi8td1umwEH%$+_pA zD}VgsALZVA@0ID(r^`3reB<8Z2jC%x%>V9pzcUrh>BWs#8?W4IA#Rz~tDN)9vx+=HK@Qc1Nt0v4vt;b*dnIm)5<@~(=_w7l^nk)=cz9772z|qgi&C)RYo`J>9&d=T z$*KCCL&7NtMy?tbV{f3454lho{ly{^l!%zB<|^7KdRD2zkSgVnWmDATE63^&0cSs~ z_7H8X+vIjbzA*xzbP#GrW%Sh_I|O>0UK6B-YiSLgN2!o)C4vI7LoOTDo%t@po$Da1 z97TtMDeFFWy!V9h@j2V@m_zij#KT%p7$D?a?`d$RO@j`EBb z*c-K_I>shD<8FVqjN1jGbQte2B-_2=r+~6cY6pa3UuqIqSac8&HclvTd_=I3nY33j zOEKe|^quaL98Wal7h;Ps7ekXTAmnsUS@<83Nw+)2yFAkbx2yP z+K>FaIKBC(h?dkq+>_34s}EXMM?|T43O*S zAQ@%tf<>ndeSPOkiQ zQJDPO|00>%|2}y+{zh4le4gC)=@u!fte0`!6O3Vne%5KRVJ63#$U(f-YgBgRme!ak zKyMLEF|+Aw3uR?a`S&t~Xo1Z5aOvJDLX!1(q~`gOX`CFi2D634EG!Ox>{pVVmX>D1 zwP(+sEtg+@xg2}!u`+AcEJMD#bW!2*6TkzTo11I?lE)nx87bYmb(67U$C_jMqDPM& z#!F7VcWi8|goLyr2o>as;|Z^*s4%bAyy6`RZr8pDyXQJG2PG}-5;+qme`U?6~0^NN~drCAz|x3 z6hFu`q?T4(cvi891H8%PRuis9yDdDq2=yIGA{QyKBrlrcBuid*il*^>s&W$o>kX{c zxk^Uq@tL9m6hcBTO!B2EN`~h=efSR~6GG1QVBsJ53&(oxf$<_YJHpcgEY^_yJpB$K z=7h5nevb0t4Z^ai=n8T61!Nw@sb0^C(SV-Sz9a&Yk0OBQ0Af!cQ^0=Y<#D|frlD{P z;JzrHhT(u~Y|WF=LBO1-uCD%mPVhQZLCD@0EA!;j<=f?yfhls?5t(xBgl=*||0H=~ z)<*eYNsfEZb^y^FjAwE!hg|F!U{q<`P!lN$(cu!@UB?ndkg~oJnRdU98FyU*#tc2- z85>=Wa11R60a3v&*-_VG`enBXq(~T&ooAHz!x=Rb5nIRKD{}Q@ zkzeZTl!u+D6C#>?=PU+TjMR|n`Ok@*@DGt+sGL0NHm8sott(w#243U{sY@7#DaJ{d zd28T0_#VYLSz`!=ruZf)O%(qk#G0_^$W&dA3LJvX{8U-l)Z=)mSEBcm#TVNMbTQvK z(blNMRzb}co@ckCoH{Tw_4yKt&wXLIruzp`07Stpd`b0WDxjeQmP99%S zA^H7(sn5sf{&<%B4utfpUJ)LbEED=?NM@g5vNCvxytXMqitGJlWp0IWP<*g7S2h$? zO05#-pa4H9uc?>kW^9x*o|rA?J@c(x^U4D0n-XP0$h}dj`<2%<_-4l&V6Zu3aH_7Zf$vS3GR0IhpD6HX1e^|tg=F%`nz?{ufvB?K8F?FkiI@?WRGjJE!^nq|MJQoNzpk)dcevAR*ZV(ys8&zz2={+vo`CJHky;pP1&nPLP zbxLc5m}BE{;2D{aKCd4E(nQ!fBs{LGUaMOHm*>G5tyLP7KS4HPvCstsCDXQI71HDF2=%3;6VKyEq(^jxqy741_-Gftl@-bQ-qUzZ0d&467rNs9K$VzE63`* z#(Pbjg5&-!a^}BPo)(LI_*;?pt`m9r6p=I~;pe{K6!)SUHZ?e?-+?S+9e^0qJ`y?S z0qY5cM={6|HjNx6teSKwWC=2lZ5X?$rxK+yn9AB^TcqXmC#AN&L2CV@^cLDUzkdRG zd3i>k^UR3>0|rP#gFdUh1>_icP9&Up-g;jc?~wn<)Gc~XM6B_gc`hS=Db2;3rK%}F zf&&9(Ot%E7NIyynkA7GhqdJ=$bdwuD=2uQ=zdoCmB3B3|!L}sk~<mfc-+UKmGJmIqkI5jJKIQ?B&as%MCXu$=O4&*incKj{?NmgT}_j zT^3_vKMJnB^wLX)Gz=d;+#JUnqeqXHFTea!wrtsAUel&2sp^0r9BEm0nF)8LT?BW* zi_kWoTQWtA2xnTIS78V>AxV?h6v(>#N<%Ivh<4(DWV5|=4CBpq@9P2QK>@-O?Fm1lmp2}1 z^34f#=M|-7|LkX+>n9wIyf6A{_C4JxhK6F4yHrd?S6@#>u-*gLg7?uITrbx~$UBOH zsqji@p#DfGPo8jh!pbpZbU;8LsC(d<6U^B|5e#8cCYU4iHT4e3BmWW)0&Q8|Uz{W5 zHT$3(GEewD$>k>vfnOSGP;!F@Kj9qG4DUD6ZM-WId0Pa>yrg899O+_C<}#EmEi`d`69w;lGru{8D)* zKUuaafux2@djJOq1u;EyV)g3P`&Jl}=gr!PF`Y1Xp06(mmv7SRKl%=%KlpF>N_xZx z$ra-=WpL*h`Q3XP3QROCYtYM#)rvSL(@#LG+!Ol z1H#A0c1w__E*dKTxqO6NIyS?c_tRzD<*BMf_Qm(xyirAyS=CI^YfFqxHze;t(6@+oNAMJb~#>VrV5-^0KtCx z>8DLdIl17iwF;xEIo|Q`@No0ueMbYjVLVC$NXB&BNr#~TbfhsxYzh(>yMrraCoZ-8)A)`1O=O7Oi z#ibAVVndZEDRIVI&32B(Iq>XbV4%u4&xisv?8kHP^b_aWY2AWi(ZRr6W8Qk^1g}F8 zTT80sixqjMQY?9OzMya*9`40kcj&8hr)v9c2H(L8J?C*Jhmi5=CfHzYNwc6MrA7EC zUW^SYhz~yB361o4XAUC516>lKQxc;hs&I>*aJYIX&?2?-yNeV zbwa5APpStkx+rc)@8$1Q?&YaGVcn;@1#-w6RIqL>YKh{Q6ci<|nwC{WP$6U1Pf@vb zoK!UjO3Q>>MNWJ`Wawr31(g+?hkvuqM5j8QUtbV~g%P`Ot-0>T@J@2i#vbzTxdpPl zv{uTLI5xIJ0a(Z+QHh@UfPS?rg4rhEiRYQK_7L9P+Iol%fH8tJwe0W26`3Sa)mtSp zuth#xoGUq{HL^6PTyFcSK(6_$OdeR1DAPOtTGmZ^Mv5lhFMpj{WD1RWgMU*PmJTcE zQpYI4=6!Kxz4;8MMr)P7Ya5%4UWJGHuc!ByvxlXd9R8m|gc9qh&>#uWHHGxP8S4wp zd5-OyWUAYLqNMwupRbWS-d`>c=-({W)hsM54t?wwl1+&AZ-4t+laEaf_QMZ9ERR0= zsL9_cC@9#;E4@3gcxPl}m_>c^^Vw|XQ;A4TQHOw_8ob(<53uSt)sZ$DMk^R z`F|p_9(KHq6g4BtOM3t0V50<3X`HW9VI3kx$UPQ%^5r-m;F`V9W6|z-_1i6`tqLBF)__3@WeU#) z#RPqouQnl1#}i(uOt%5aCm(Q8exvC>oe;WTme+O#dSra4OR_8WKmkV?=AOwV?SKX& z3yZx5T_VT_rWlzA$Vj}(vmX(e{8z`gjFyKo>~S`z2xsqvq^K#ih?Ka^Azzw|=?aQ{zrv z-;(M^d1pnbe7s3TeochzXbhI@9ZJw!L4H}CoU0`H%5j;J5f>&;PtTIaRp(lyy3#qi z{hVQG5)u4^BLdPrDl^W=j$2RcW4zDw&E8XK-mLl**GyF55vsqDzQk_?s_UCfEfHFU z;pMilusF1_U$j6hoFhhzFu6Na`zEiAa8&ZJZ@&3vx#gByOlz;UfRH^Zi$kEVx#k*q z{PD-l_wkgIGxz43Z+?F)PvvoXWwS3;%%6DT2~*JNPy-dhi@=jSFX=w4+ z|DvjP!5Sxgn&Mux(IA8t;Ne1nKmq4v!o5_Hfbc@nsThh!m^?Qsh7vwUxELNj3YL(A zMdj2Ze(!`^_c_TS%?M)%x2Zs`q#3Vts*+!F!y(EjCMYFX#sTN1fEqb(oDUFc=lR?l z3GTum1u97rRU9;;H)pjwRMg(tNd#*Q7;!dUTUJO z=vIMYi-j{Kk0KRR9XC~6EO!=m*5$NZk;YkAx#p{N}$3DIYF{>aWIEF7|Z?(lTkP*`bNdxZrB zwCd?@3pg%q!f6bn?L0OH%Zb7$$(r;qV$dx$WK4%5DMyR)CiQr2_%% zn-}K+iVGfbD!z78(u@b5P&|~N7f%rRbLD&OA$BC?J4HVTRO34?`BQn zdDGu0kzewf6QQ+vhGYDtB_vT+i(VENBy+$yPG=0NlGC+5BBXZDvz&L1o{AKzPEx?-f9J|smy z50pO~(^Cn0jdY0*lO0v{vUYog(V0*u(1GS{Dv_yc3+1_)o8*}p8>Me*lmzZ$J2cW5 z@t*(o=$`V6k)4eb!xxv0%`imU!ouRv#z9$i4>i(zpaIc+YFd_*RMs1UOJRe+MqIe6+h6h{(mgyeHIV7AAX+pp8;!@bGt>Aq0 z<;J>A&b2p*hRjkpiqFY&#@j$XG~sjJM3(F5eykIgM+>qyE)aSCXpxtcFjH8KO36r6 zUYzHDmpIA}`QIo&e9u5X=RINYzJ`Wf0V0A2JYE&HkVyANi~Rx8A-0y($V62p+X{*V z#_9ZdZstbw^Y0dTrq+DwD1g)>Xic$JEG=r%IV+MGedw%l@gXh=(&@Ceq>bYmgeN)S zu5FzQ78ZvBNdKUsH$^cg{K+A?6vl*jLvp=k3kAr=-MDKbGy8_eJMs^YES_n)Ww4GQ z+l<`Nzl2MZ<}~;hBFo;=b1CU$eR=x|kq>_@^5(@(@ik=L_qo_^d9_1j7@ZBg*Q0=7 zsqcTjJhQM;cGNVO3fSZkcSvAJr(h|mC!gNrv^;z3QpbB;TPn>prBa(TOq!yJ8geT) zZvCP*wvPmdMH%n%@%@wK))RZnsIIXxWp%!MwmeTtYZ~ReVVz}mR*_u&@&Z%mW<*A; z@$5e~dA&UJ^*VV)NjTNPdn)Or0=usmfN<9~IC{@lEAvfH?WNmxNFxPr_7UiA=xl#K zbAY5ohnU}pZ71rzoH8)Qkny&HQ~;`y&sbNe_q18>f4eDkOSrs+g~h>x|J@4r`=2cc zbs~iQvdbUGSWPGPN-&{IS%p=G5HH?VY8K*4#&j`-&*H;^ z&38is{dBQaS#Cm|{H2Ey?B2-{l2uS;a933nXHow9sl-_N-b_YVl@qoX(C|l0ZF-}O}E`cX$!GD~*ecHb5qAK`1Ya67vrbSNZ zdgzP~Ld3t@-Xu{$E*YEF#`O>57JXKup4r7pt^vENR$c;DNXwsG}4pY$Eo+6qQ zwWJ=0>5r{)D=t$P#xj*H2~Gslu*;krYdTZx3Xo{(fRNGz!6vNRQ>Vil5b!>yJ939; zD$-Ke<^kSo(vXm$gqC}dzd{-&+-}IAUucZ9M5jwb<|HW_{#)5l6(Uz0(N!Wt15Brd zwnX^sDVP)g`q#hY)?07gyE%d9e%%-1TKa}6CtmWFuzi)qWnE8_Sv46_6BH+r0Zrl; zkti)H=kvOrD^He=mVeL5Gm&>+Dgj=bSE;&EqNz!mN&a%?HCmJ?K;BDxP^>J1_Wd#N79zVy3F`N|X^itG53TMux=`GV-0CXO(`RMHnlpH=;rUjKvo#Ym}1x zG2Id*Iir`zm|r`gvgDUx@h4Yo^mPuo9ixBGd0t=7iJYykjB$FPREM;(FF9qj!D?%` zbZx1V7dP!bCue%*gDSyP08&)!SS9es>vj~KWA0MYtH&lp8&5Y%|5^XmmtMnpFY5JO z>v$jOf9)-3j73Aw;Zi|qDt>zb(g3OV1t9~3vLEII`%nXs8WalblN`Cn%Ea)DI1@Tf zNZ~Gl(S|;`q$NFK?ycn`cDv>oC(cq{xC7Kam9=RMI= z5NV2Cp81R;-_BLpLWnm75O)ViBn2?1{9T#QoYvcKDVjp!r6wRkB)YQ-X`P3@#nhUc zq@{GTG-gharo?`3BXXwHB#n?Y)lo9P|6k;_w7gD#WF+e4(Fvd)l0r%H9p zj|}M@0_X#D-zeYi+29y4FwocRkafik^2EX#`A6txYYxrd8cA2*J2O|Z)^|x8Yrky1%!z?T;4h}5I?jWG8 z!`i%Z`OmCPCfw(pMcJmc!}BvY$(*bb6TUrnW3dwMMyXT5f|c@{wS}@SztSwSEdI3a z!lH|1mT>URO3Wc2-oiB`z|VNFA@MA}SX__kn`HVm(#B%AlI!ayc9&BJrJ90ir|4f( zsTqZ%>qL)p?3Ws!?urDRmc#mm)#Wsq?VCkoA7oWAYColxKaGj%+ zcq)N=3dztC3vW81;)J8eWH`B79zeG7LJ*>jLW8%O{4V+uQ+$jbzu!ZpD3a7W3?A*U^1C$*0Tm zj55|Ls5ZRrb0-~val8xURW(a)Ws3|nDalqvp1Ic+V2C2sVplDUEG!N; zAOoqpfpHfj)2?cOc!NTo)bya3Cq*!oBpcc24V78tJx-JS+5l;xi^VQH<1K!Hk{vNf zHVpWqG!DL0nw9id4!cfD!n?_T7M02abF1ZnIhFFrj68WtiCRKlD`cD0D)b4ei}yM~T2$7qDsjn%;yRhXty-R4R4$uaQsm>TW+|v~nW#K(aF6r^ zPgjynnq6#IkTmIf_JZnEZ_M8&ug%?J$SYBP=MGCVQBR}{u}^+^o#}Ud>Yx-kp?{KT z0eR}c6dBh&!FanpHF1V3abI6hVG61J^`n(03W}N@=x=m(dGM>X=6uxCA&2|E&sOiU zg&0L`XRLRKF*;g>{#JyhvjWfERMO@aUL0rsmJ;Kn@dgeR3gmKJ3k!>b152_y2w3PL z&Jbl!4iLbC`sJ!T8I&GvEUpl3a;s_mKv=o27n*Q#JlC}NB2OvVE93}|-PK2SGZs#Y zebhI$n3fb&*kqBva(t%z{fz#m3Te-z2oqjL1#<`b7vWE+?C{H-{?wgE{Bj zxW6m&QU!{G$i8VEYFjNH7-| zVaYyop{<~jJ_^d@RjLsC0@7*P>nPDCa$`4946BNojAGOM5Gs1+0VKPixK^&M!3h!LMQovgPiuU%PFWw8Gi=@lPr(&nxj*VVLLWS#eKr3(V8Mp&@F zY%i~sY3mA&(Ujb5Z<0w=;MBDRrmF-_h^~p@^6QD+rBhU}OkZ1Q!n-dWlOcoCqD<7_ zl5IOoXgT^(zm!O$KRu`9`STTdru)jnU$2wNtMjE$b+N~%tlx=H^Bp1v=c>Nd8gOFekne&hib+vu=BT}<5@ z+`#e*8SWDMW1@Thh|V%PGgkiJg+t_?)BDSxPwFFgoz_ogZ74KEouV2X59=?UK#2LI zK1tFyCEAedU54FaaVAU@kMXYd*;jB5yvejD@kOL6_9!S+9N!&^vP34|^+TU*+MCcy z3=bT{y4MLJC*0@s>Apf0n=3ze{++Hw3Qsi!YuM+Qzlfavn8=7L-MP4KFE%;j>%LGT z|GG%7`y7PM&VOEH`P)v2vBiD|A$4o>DkLsE*mx-R2IOLo$cQtF(~pV((pGNKYa$7~ zMPjdT^0s;SRZ=_X5((KlL*iDwB7TKy z#HBJPs9>4I>c1z2ZIo0WBV+cCTB&MilFGUU6Y2JY!MpBp==cUi8 zU-9R1>$pg{@2mla6hm4Er^iT2RETtr3v(aDL6olJ$M;K;TqU}ts!I_nU)T6!Z!iUA ztM$1M9r%Tk@Fm%$a@wF2CGnZ2qY4GcdL%`fw7oNiq#4J>bsz(km^`1NQMw*YieDfWKU<$9F(z_zJfhrskELV^3YCO{{suzyxEDxPGNIFFZ%kPivDPy|DnOth} zQvdH4gJpS6nF$@AvZg?)RoMJ*{x-Se{T1@vrX9v>PA)b*v=bwO@|hXuC!06uf2-Z(4uRR*QC7{Cih`;4XN<3FY>NC*=IRmqpHg*2yKO z%@~Dkk_S4WSAk4_JCw#bg0O+ zW8K1HSHP&t+CfV&473Q;$^vOFTrZy%b&{K=R7!J0orIRG*3WeA(XNU$XTvulNBmA& zuJ}|MfAyW@kG?}nRJ26p&iIi5*T1Gn2IPMvlQKh08>>luI?3p+agv~sRU?p?= zvNrBl=>57VDv2y54a_?NOy37*e=Cvs-^q(J*BhPWfwKq7lT$Ys;&}apZZfP(jM0a> zC4|dgPwQvyAF>NUjSmkty|PajkZfwzunql&f@P$=qu-Fzy)3)TgmC!VPQpR>q5PCNL_q(m;dpoM@Wp-Af$u)X^ zagI4oYu@$!4yuS#OpO9FgD({6dz#3ZPl+6Nk18&^C^mzFLJl`!+>qVYz&;RX2ov6C z+7Lm;_c**;m8D%-AR3j-;{mnU@4$mlR@-2VPlWpIEr=9BQA5G1@OZN^%FKFHB)Pw% zB<}`UJDR1WrbYV1cTi>aCxCIUYqYKdnb z+DQ*e%Nb8ekIrdw<Z2j+Tauk^mj?RlFb`W=wZT2$^fiQX6$-CL8v%}tUX6=eEG zXT*n^d}t!?s5_I95GMaPbAY^b{-`K#VPVmB*cM)jj?lu3d@4YnL_`gXb?3NHU7%y-$R3IE z>GJKyYscsBEy^+f-mHQKE379R>0RB9)fMh{*03}wsj4?QOoU7w)jQD;5pt0(9@9lW z(eFR2#GQQXX-X_;@j-s~BHjNVpRSe=jTrVpWl+r3uOj z9_Lnb!1ab9)?4R_toq0axuXCZy+^&tF2s7!1&)_|+5bf5KCSHN8{=6pJ*`avHQlF4 z2|9UkyUqL7&uH<*xx5KImCE~`>RdAl)**^Rp@kRT1?mfUs_;kngx&240#plSe1E6P z5_6;rJ?Ck;L;JW9SN}Li(g)MFHOk7OW;rrFK*9nK@H}ZJyp&cMKW&3d(vss2jW6j;PEDEs?_o5Hb{gWl8u~7VTRpxjR z(iZ(rQNPROwXJdT>bz`I+l4w6^h`doXQJsg5~3t5B*5RCw>`11Exa&OH`SCW$JYDD z=w0dUDRxOi@=(c2JxyL;)u5z0!bIp5Rn$v-M35}rT58gTXlY8mG`h^34aLS$P+rq0 z&&}KY} zlV?k992Gr=Zzn|s%Q?f+WvZT=NT|`>;-pgqq3oeDv~#pvq=dg)qUuimu3hR+3>UoY zka*6wC;hR#!S^^uwCZwr_mOE978VB=mSigqHp#}?T(3l!3dtWY-EJ0LLWfu=vD)I1 z#_P;>3N$TMfk7^{2Uud$V#ACefQWM(3Wd;mV(GRrBP7Cu1LfU?*-}y0U_!^<_-?C` z6zBUK3Cl1!JqRR(mIqaOZ3w8-2LD7K;r$87_U>}PiT&F0t&@Yj;$0{2jegw8gPc%S zS})=KroS~_@i~u)EP6>F+%}PtjUwB=(}$o>fvV&Mt3~M9xoNsncn$JxJl{U&CiIW$ ziKTMi#s|F1(J>T;j`sVxc<;Cv1ZKgE%_1%UQhw+m=_(L!*~H^JKtT z?u~~pEXnSGfK>0;>hz!HmAg<(cMH~g*695WidHQC>y^Zonrh%VrL{7B zU7_(F&&ev1$*T*@_Sx%;%wm3q5?w5?ghctGtfpQUd&e6Gu=vu?5%f}mzFh@UtqPOQ zvEi~zKZ8>eicW|ux!hn*RXOaKjyFcy)viLxHlSWR7rZ@DUSB_UqrrrLS)|4B2)e*vhZbxsJ8~Z zo8$iA9NV1ZL6J$fi=+)}UCFX&SJ0=FVmPVMp~g$IufVW1AT8P`XB$>7kd`fToVIU2 zp{!?Dw8+M?X6X?_d%Q!FU<0b6*EP1trqUm;FS`V7cM6nT*g}662O7u$44kw6PgK2S zToqB@F09fhNJ)1$(%s$N4IAlhkdW@~Zlp`Pq`Rf0VbdiYn|O!&e$I2=v%ZWU__=3h zty$|ISAhOLw|#rn1>L@)KitDMZ(YOnKJepOscU^ZF4Esb(Iw5}nq1Fo(*idLhRZEB z<~mP#iGiYX_~QwVNr_= z;~H!Ng+hD_b2l^puR>O$zs595IClmE7 z&ErKIp1h>*B;Ztk=!e2ousu#XQ1tJAl4Tw0JC0@yfePrbKQ9OSQ@@L-dcHj%Ecw8# z0N2(NH-+R#Ws_ySqM|}JHMvB6&M+AdFaa{Bh%!42fTqk$ep9OH1h9PKf+&{ZP_L_s zmIiY-bnWP?>3{Ed^^Xz7o>E}NPR0Szx+SJdWv-TbVG^_uv<@9e*md`@q~sjtBZOG3 zHbV0-b_WsYbTz~lCou^Zi-q6kR|=XAG9f+r%y z);o1T1H3W*x3Iw@@Mddu%An~7Qm83c#Oy6kp^*z1lAXrlnC)QKY@dHFszT9LHB~e= zD3%?R;K2+ZitYF5`ENG=u3(8S%41&kyQAxL@d4aYT)ESgLT1$ z4rMtWA{9h0-8D9dueYjzqdz7QzmActc_V_S|4wDUJjB`hk1z8`f@bIj@j7K4vWsxj z=lkGj191yk%~Zkg4|f3e(p$Nj#Oc~-utlImGJ(EIA+9xFa%42g8N6l74~|4?41nTg z;rgw%*aZZsCGEpRF^4qW_#5hrBRuKtWlINm1jtVY*ma%;!^<~@VU;h#7$R3=+SHBF z3~h|O$5#F?!4{$}_5xka0PW5Qaqw0?Ccz!^TP6nQry{Z4x)ey$ukGAFHCs%IIUUv= z)BL>RGCFcU@yps!x7Ng7;>S8qP^MUa>(u>tTRC8Oepa7En>A&I zY|;QT4~fca#po1sfFR&EHJtDYvC2MN^BzO!l)Fa<;nxs+=)nWSDEAi@mG{6SJ8b=A z^yIZ?GiZvJA3(mrl?+05x!+DGnZC6mOr!Hn-k z$nPtkuX3IF#?^d6;IXPP!`+&BFJK>*emx-~F2)lcNG(qm>P#&cnqlYfbVYm)%0b+& zinsYpJRkJyYEiOQ_k<|VX0!G8{jcWdI$fw4g1)(4aAbtsa)z(fvFX6aU72dEJx*zn zG+DG8SL(q0O#CydWA|SsL4Ut&xA7Q~t1a|y3wg9&gq7Xm}=X*?X`|or+~Gni9)DrMa8b* zFJGgM7r8{5TAknbdL2TQ&Q1H=n#6ViZK_4N1G9ofViWhA*+np|Aiqx`AFp-c;nvf@ zC|@D^?+G4q!H=IH`Rgz&Df@%=s_9JCn-1ZKA5#)x$urBh{2(sHPe+a7ms3-V!yAJ7 z6f0v#QFbCfxx{tKFp!Chf^174esP@Q64|L7NR>gMoGtwm(r$4LJ*9!4IPp;D3Unb6f5thG9)c?v;_8NMS?kCat zH;!kC>RVrz#GW4=fqxbF2<3tCpUviYq|%+d*<6d_10= zByPht*HA72p;R@AA0^zFm93LyhPw@tnRe}-^9>eTNYyXNbd}`y)joF# z^1M0b_EX7|`~k(p^HlOhrz-fGr0lZyL|!JRV!UnYBOHuRrKN2~I!agKd*jRm?T)Ga z*b^Ha(;4Mmu6avFLBPehRo?8PR;cx~NMH|#ab1OWZUH01;hb0D&>a3|bk*tVi3_WH z!0-CWqe2s$lCHx6c7&MhwTT=q)AV9PPj`0wASqImDSCmmO+>|iZB5794IF5toXSCF z0_SbqFQ{mjK4x-;`IEa^9X6>XS`7o9vZ8aq8+==CbVs_6Ua|}>kbF8DWB{l%-~S2BGkVuCEW}rMEa*C zs*_63^n|3TZtk>CRPAH?)p8Ae-|x8s4&-iJy7O1Btf#9J;e?+-?S1~e)Ljp`)Ka?p zH2GaBMaR(b*YTvC3V>xHLuq*hC-EZHDxK3P8EL2D%(L1IPC0buclIQ*b3H3~=r6_y z;o_+E2VZ}x4nUbIhtRTB9WHrChu|X6=uuKh*vIb%M_(>vc&S^h2#`1?L4;M=S_ZbBAB`D}+6c=Ek?+W(br9F92&MZ6Agj1> zR2~C&(C>vw$!gqq)I9M6Ia(ukoi8o2F5WYJJealMJTsW3PC zpvyg;M8#9f9cb>o+waan@2!c0o4hC_M@tH1Xv$Z)

m zI{VbWyF==pH>2%HGl2LY*6HP-l%H*@MIiAkQ)524kB~ZJB1Hq zstUb?gNSFF?8{Se{inLV8O1Rl#FzbG>Chob5`OYTo`(uCo1Q8Cn%dLDWHj=hfQ%@~ z|8yc)9O1UN9A%6j%JN_I&bu3^?{7!D#TFaGKYH1HFg4pG1pQ~uarA^;Axnv;eQ&`S z<%UvW>R4FBN1{LbGY5CZ1Oo2t#}Z~>S|9XvXc1eP1|xgRbY#+nHIo#jcmsQ)tBYq} zCb#g(8eUZr@0m#{_z@k1aU;p|SGmh?Io5SGNn3z7S@U(ep%@H_(+?={l_WNynnVO+#1QHev?s|%}HPtSVD)5amLli@%6|N+g+<(+xx>HxDzg^bNYB;d*6yq*Vt-# zh(3?$OEZn=%DKJsqHj|gfuZ<a=0%ZaxLld!KjnBB_}bnlv){kH7o!X)!OW zRd;);s37?mqu)I~_&CFLn7Lgx4K#!t1n7QwIw7Dc%hj?foYUWCdK4AF9dr~5EnhPuQ zE1yLtQS97Po=XereBC9C^G@w!lj}b8+6E=57eu%@AB1-Wu5_IGwQw!mn(>0$x8s;9 z#sMu@u%!$6XmY=$NYMS;R`xLGrr@>ezCa>hD`wO669>WJv-#5|1I8x``a<<%;b!=} z8#*1VwWIvc8S6cXR!&{cfaUQYZ^aZ%>JdSTZtZePpV@`LinXn^`utvjme#B=wXNo= z>ErXahF*J5Uz^`J*K_UlJj-4RKUwH?IyKAHf8r3U&1EEQ&VMd2zU;aAr!bw~j#jKk zjLcF~n=q9V$Ux-ejy!udo#Wlx4S~d$*UP@?=^5kOmzPg<$kxqYYVH?4 zj{egh!7~I-+tAI2F(V%~*{C*Iog?1lA6NWfEs||=?_|RrgXf5IO1Iy7GItNYK&1Fx z{ky%%`g?paxh1;cUyOVFZY`Zn>H9A_juFeO` z<2~-OX)x`B0;g=27ZjO5p!kv^gW@@(l4whhQ%Pz{`us$nuikd|;vX{&x)SQ7VC(f8 zR}zEQT6_}M=eUU10=X=wND!S2yj}%KFXEdSZocD?n{FYU{pL1WXlr@7-WzWpJv+Rv zsww@nBLAWj!(yBLJ;HOkKLKN}!=54|t47yzQ=J=?WYl?@Up;X2N|QwXnXo4u-#c_d z<#zewR9O2zB|4`pFd->F7v=Pd8(7Bre)G`N`s=%JS?C-09=+1l9TZDUqB;&EYU z_O=MqfBe&V;)du!_i<30TBzUjE|W;`ua`F%y)XDJDLDhk^$gI{aCN^m<>dx@d27Ga@j z1!L<Q%>Qr3E& z=(XOq=UMgmAD_+$5No`%YEkD7VayYMRrXly_SM-}$0@X+SdeSRP`hRHoNf_%gfw>M zg6cGXFROqV*44eMn*RLbe`*aA-wy5ymuea0S0o52EaAQM&#v)38}cuZsCH=o`N64t z+Ugoz1&jf!}>-52`I>!v#>ug@y4 zs?jDPq>X(Q6sup8ZSx0hnl0RvoZEVp4ESrVaxIa((FZB>%S$}DADZ{dRWZ6-Tsu>% zs=qDWNYd87KRz4AMA*Sgpls1DliM?ZWR^+2*Z_}oBVB!PZ`7_ErZ~jIiZ>pcaZ`5Q zt?hN8T+mY4?Gx@C>G;6yWBK|K9| zzauUdUM$PC%!5tmf3T2^e4Ny5{vdwvAB>``O$xn_XV{|GYD)oQBa->xDpAWvo#Qm@ zG-y_v4Nr0p$qE4fUI}x zKUkZ>s%8wmjyVbp^LZ`m5k#~>#MIZ-U8Tr{`RG1cUZMA4=)7xgdGJa1A+0U3VWp!6 z&1zjCsbk`q(S&nro8{UP4JD^qulS1|_biR9=nca$jextYRad$7&uH?WHD1#5uC8b# zU+J>H?V_4UQ=%kMY3?J0HPks9`*c5<`QX>efOejAxBuOnDZ}pwL8?>u!py4xOPL%K zUf(6h`~7gz2dU?GSDZeKSCHt5#SdFg(P<0JBD`G&L-h=Vvrk*9G$jW6u9s|uqZKjI z?IwgCowpZck*(6=4D^MDKP@hO;f~j6dbkH1=pl?CtiUdrEqgN`nc=t z1!iPK8z$a>8z!f~eHA}kz@4jgSvf&Zbccv|UjXdLFGdK zRDN*Z1d;JZn=|QhFb7SCy@%N!1v+~F%cYya!OCj4&c@t0e@<%7Gi{4}Dp6mh zOF1l+@~A`QcpiO>CYiZu0tE9x#KVxxmD{jduw+)p!NCDs`OK!J?gbb+Q$Ra@6S|hz z0C+@uA8ZX4`D|EjZf~RC7GN+WfO>NUcq;M(cOS2!C4xxZHs$`ZuSAOv8*9bst`UWhNK7)(00)T=r3d=$t>1xozlZ`rh3tE3W3` zDf0H1q;qZ(uX_E}8v9Bv&d%Sp4BO$47Zhr=g}BYS}C7Gmf>#E#? z2f?O3+bs^YWJOVrOW7EiF$^7fdT;?oa#f#Z5xIMYP6Dzp35xUsLs&{$~CIuE9d z{bJt2hUoY}jZF&gGL~8jn3<;TpBkfO%(I+U|Gkx{@~c7ZN29UiI`5yoR*Kkj3=dX=Q(O_DA!~fZk4jCwd&W^WAuHE{zY%R8yOOSI`%UqgYt^G? zv2B^QA6pOTyMO`^eKzv#y&J{ywi^e_R?;9R>66K!UaR=$R0kXP0?OzY$!xmcpX|5% zx|6G;{%KiOlPh9R83T5CuK|!Hhub-&@Jmjf2sdvFPaCIcNt{+6qM+&JhSje%w%Gn@ z5=1*g$=G94`p$xlTv}JtT#lBHH~!sLi^NFcTza;92E4jR2S-j#n^n%Xn*nVs`xX@) zUng`je3pSN_e;JflO#q5#siu!oS=(;f zN3Gm^6(u#BF+28U<6edJ`)gHJzqA&wtIBsI+fzBrjHU!*_P7mW6Nfu}uP{cDyZ))2~~RD0bY_*;$<@ z>aOY@Qk=nu+lG~ECssehNi1QiU7M+ZF3zpa2z$oT*lmh`gE;k()LSJcJn;R0!LT{P zdHWGpkmT5lsiuZtuR+;cj8AB+pBVBjg)e+8`w+qQeB0~AFZJ2;Npa12xwlpm&RFOO zl@PJ)JwY&qPB=Jeo)Z1?DzE6~Im+utJENH9!(nw*83B&r-DR!N%}anW_~Q5PC(A=c zvKRZA+A`zO7uE{Bei;3FA|>RCv9AFt=`w6lk;WO%;`{)w(F&&r#>R|*5d~MdBjz99 z{=DRbGxe#IcNEmdJh%33O%_^Suz2P4mi+3Q=W959`|)MEtecj1WSl!{$+F}t7M7gO zQTcr$+FUlGoV}hKd;VvYeWm9;8mzHagiOW7kJcv}#z$Tb?s#EF+~Q?l98Zrb4r-ht zg(l6+CNL-%s&HcT_4U>2>q8=`mM0pFRV?ZPTp9PegV%sHECcYn@Y@W<3oJSq%=9Nj>~{50EHs`#f;JkVxW8IUC?H zGmojF8jnFRn|d@cT)PWzq4hRT?tUa*Q6JuCb^3Ys2OELuF+E19`ZOvJc}KsHputO< z-*!evoeRak)K3$d-FafxX`X*W^QEA`c$pn_J-Fc+{b2=YPVejMgKZ6Bb(WTwqb+m! z*Io)L$IN2Lzvde^7&&mI=X?cscUMBUEc%|G&u{{?BN@syMJBdUV0EgoCFuposMCN@ zB?pXPW@%mnYqZfqQ-AaeFF=gK%;qznOO8{WT95lf>oL9Se_2c1Jf}^hY39yTn1gAbe}J_U`Gz?cYI#cdJf8LbpP-EQ zq6K~Egi>y)SjFKMs_X0QbkKsQ}=yrY_@Ha z;#JSdK)&Y8d?|uiAASFKbObJu#j1s2isqN&qiaof6B7DL%6_mE)_o`??mG1h3qc zZE}1v6ZkZ5^Nfp6KBfGbukCs`3#De0OCD)^-lu*uRSONxZB@9L;UR=ZT@YZwklQx zuzUWyBDFm*gZT`crrvlg4dg04GQUWUD|eJ*14!ug<=lHq?W4En^-n8Hyn6X(CCBkV z(!S`aUszwPhjM8{#v^DP#0_N!xIIIVLM3vcU%E$rQC6{OwZXLt>23h64i^W{)&S$ps7`oj7%NSeuQ zl?dO{cS=??P4g}P=&fiGk~)jn)B^h$+D+75lC)AMfuE1dWt3U?^;5)XexoENUMO2q zPbUOgfB<(RQpud@N=tfTf=epbD`$!L;e_Kw&mZI}m(6bD!K`!Z()bJ_Rr=ZGc1sfC zv!1DyyJx(`zXl**-wW?`ru{OHCjV?Gh#{ix-&fD?YMamNjozxz*Dox~H4g7SZl5MN zv8DAfnRfU$m`IIq7WhxVE@(bFQ6I6zKC#19x$pUr?h-9?NeZil(K+PcUSaADH6i`* zPf^}xZd^|{R7r#xS7l?RUxFj;8oPYCO4d;Saq8|5V2yb}#rS0+Z_Bh{t&n?~QV-X! zUnwnkaFF%vxn1hLEm1EqpJi=$9xDZ|j`SvsL!vJK;@_=aDT%5~`EXoy!&2Rhg0TYr z82nDVL<+T)`{cC|t^!#^N4SCwRpr@Bsh&&EP=&klapwN<`Krmoc^}Ps$<#8tSKlxp zALi=01y={JZCng?`D;G97V}_2U+V7?k4{|~B0_Ukqn?GD(UDdl#5TE*;H z_m17GFxFr+2cr7#?Pxk*+H&;=mTj8T#qmWsxG-@fVtST^veH2}X3-AY?I4>i9r`)t z`h|dL_RbP>MYt4FYc3sN(J3ZZ3}3L{SnnH5LK23L6~G`p`%E^TW?w`Y#|5Nc^45K# zZ$>*+O?}3*i~W9l=cG#ZSQ6e+*z|tnHAE+0Gkl|M6BY_;K9f_r@^Sjeb0vzJqq#p z$v~B7&zYUCX35wf8d`sN*T z#isIp*QMNSBp1WRU$I0DxmR9lcv^BkxZ3{}>yZ6_X3A&v2y60;cF)a$@z(?L@0;#6 zv%JOGHmh`{+7A9j|HD+OH8!*+Sd^fW!|6PUr7QzAjq^W~wcXE|dpla4+Y2^Ru=;b1 zPhUCzmPx%YQlQ4@`mu}^1j#IL9&_s2zDM(`!+X4tlOaBqJw6*5=Jx1N zC=MhL+M4hXoW)sndlxl68tlK^*SbaRbtU&7CiKJsX=p-Xvr>H3!qH@cudV zvT1Sl(S5Ev``eGo#r!fvmwx}00FY#*gJs-S`ZKFhraxVp;%jUA54wlw<@N+Gi5T}%eEMqd#a;1RLr=!u5$*THHjo9+`JMIuzu0=qs4BlNYE(ix1VmEllx~pj?(Qz> z4(XPXhC?@p?rxOsE=9UQ8V+?Ie)_-Tj{CmngJbxGr}kcJt~uvgTWpuuwl=q9+f3E| z+5YV*1aHz07Z=yZK<&X+u$l29XOqfj)aSgHCqpGO))YyzqXK))%Su4q-d2RVAOC+m z&nYMUSSd3VJt5j=4?NL#V1Df%!0_FD?AB)r>g2Ytr1lRD{dqf#{HVE)ym7Am@heh$ zT7!%5Q1cV%HPi|b)abizl_G`Xw4{NAP>K)1-VQ`xP3Q*|?u{CG-Z-h{Guop;rtiXg z^6V*08e?+y{p?RL4Tp%J+#cP{rit89YDAH2VT>M5TY~`plDkqzGZaAPjK?S60cnus z1a>FU%V8)3bG5#iaE>46NPXw}-!9w4jux-+y0TscW}<60*}XqFJT$6_@~s&EXqPwh z!xYZOF30r4Z#q~^rTqR?L>a;8aGq37_@81Qrw(;OQ-8oT`4J6+3I5$!V(g97y zt{yE|jsDYL#7xTL1B8N1yKCNVJy;KEg;LS4DLG|N9E53)h@!_KOjKmlhx)Mxl?@KX z4-gKD{zisJ=JJ!#peztb7>D0kitp*XwMA3S06}i%kF*HR)8P-F+dG1=sr}fJqu77p zQXU<6C>6!&4dL!41b7@DyvHBA;FVtiKTG)jd`@g^hy85BRJXlvQkPm?J|A{pl^NO9 zY1DaOP$}@Xk9B3`Pg9xu#=dRE9;&qS{3zPLH{~)7+_#*N)mT*UTu|=A(q2gm9^ZBf zDL~=Aw-MDkM5ea&gsS{)WVt`Xsa!f6bFoODs&~*6Ei-)9^3i<#t0#r^U2>7{gYhP? zZpz@#HY(AMPLWE5$mCUd;K*nfO=A90`z~}Zcb^=ZI0V;akKr}UC}F=ufzVdEj>eYP zyvhF7-8R^wL`ooB!QRhay$+F`KLtPcYW(8!r4=6S`TNCrUjMlj?K$E%vF$#v=&|{O zZe44?VgeLVhTi7?j~D#!3Yq4@Onzvsv{m93bL;|GDD4a)TO3#x5f7cI9#M8B!oQzu9 z%~-tjbyh$J6Gd+ty@IT`rtJahx}nCO=50myH$9e^KRnSaA-*9D1iBn`t>}?M!PC5- z>@-zlCUkXU8FjhuQ_sPX2i19@F8{!@`^R>&|Kl zinQH{zdn1K2|TRB0+~f~471^VHrprE9|?PeQNK@;8cID6D;tFX(W?ZyiMROm#mKpX zuO11&VU}Ix*;9vJ!ME1jIS4%6n~X4Zk)kuCi&gv!@;q2#`AX&b9r(a0$N~Vb7Y2cN zY0~8zAlDTUPPyczePjFdW;i{fvQ}nFtr}&j08XO%bzzu#zRPJwA^a z^$%|g;2nO-!dGUf_?xl6)>KXN>vfWMg<{CCVl&}uYJ4?faqIB2*J<^OJcTu~9`0Al znzo;@4c=|D#Bb;yNhIg(S_FYp@qipaf% zVu0>%JlTZn^Lm#!$Maj@J|9^&Tmt3yuQ()XTm(!jEHbeQGtYIY(NkK5RV|{l6>#>8 zOFZFvw!U+tYLN5>*~ei zIw&%y13EdCNX)h>&v*g-_tG7*=JgZWazKiIP}QCpYW1_2$MpkT2+SuXo5=h{6Cw?o z^F>u>4aBWrvFzdVk2^^l$%RSHmg>xJi?I83AfnV%1u*mINr9a~*nse?)`b3{`@A66Wt?Dlq`N zekP4gz!e)h3o}?cInVZbV3L3H-S;E0YD(b}Yi|tf4uW<0J1BR=-?L5ZnNO4j=5JXbf3u<>JLQWeAK=#RKx&8<_&Y+z`HI_xZPr968B$Fy{KwT^*lOkDpQ|q9&nyVXe0>5LA1LL5W~}=&YI)hE4@WU$t;czq zM)R#lE3Ec<^X-XxOwwb1qkmeisUq_?yu4(~{|B)^@-L~xiVbVSjfkhLQpDow%9>4` zI6ANssN8%$ysnX7RK~CbH`hGgf8VU`KCJIOtS;>#2HsTg0W{U}BL;6P7!SR3(*MO( zAfqC%4Br?2Jt2+nU#J=fq|WehRZ!?tJn|&Zg}e(eZrrY|kG@2%DUWLzTBm@sZvB45 zh;{R!Rrq1EBJzxrlfHZIV#JU=eR7qkAKgG8xXaIr%J+7egzx~(i3#B!XY~1{NX)ki z=lVFQ=ZXs2S&a)U|zmE2l<1eG1lM2--zO# zWe~(t1*7*wowf8Uv`U}>=6S8o`_FtXKT7D@n)a7}OO}WYSW6boiA$m3b%ZctySar2 z;;=RpOk!|~lPyr#s9l0eGtSkb5dWK&WRMgqsD2zE6z4`sS3C0F8MIoX+36}*f|@z% z;r~2TqJ4RugDKrv9r5Ow!Vd3TQ~2Lvf&ujIJ-TmzM%sXr*Yt}+y5c|{tuiMCTarcM??-pGk=44BK> zeSa_37J3RAWcOxF^XhWdA*k_F9hqZ6M3~zht8vymw_kO(xd~*gECm3#e(zEO_y4`N zkjx>=1)I=|cmy0C4`??*z(`U!yCwq4naeS55fLahwxfOx{k;KcgEuJ)+DfXw$I1(> z7?UQSE#du-V_!XSEMiEIJd|);b(a^$`Q!QQ^VSzez6p?96C!QEV7SLuY#^*|VMXrW z0SFB{Kh6)nhX}IE)Hm!%o}c153tG%RERIqh+1-?_fS%UMl{qW56hqM&E51LRw>z#; z{d+^E5#srp9bW7}Q=D%(a6IPK)m617U0TC{F||7qUf$omWZkec&s+$Aq zR6wZ?r&4&}NSGvQ10dgWC<9&gdf!*CZ0iGvx(U&B{XK?Ek?NXrp?(53q?0**#88^E$ZiAoqc0>Q+{sOf$;&bYl&fmRn zi4R15m&TgKSLrhBLUJn?-yD+ix@;(3(2~JE?e~nrLF5m@Ql~3kOR)AOi0^DSV0aNC zrGfw;%)TnGilnQa==q*DLL=KVmp(^nExMvRc7U z1n`6~&&Q2P;`P;gEDtFxRjaRFGx1KV9kuU>49f)G`PMjF5?2OY601 zamV_07wf(@)<=EoS#RKkndgF&&;DtWa$-QRqZSeQt`;y3B5<_q$tL@qiAeM z$$bywZDSXlj0Rj4o%}zY3Wxx`uaonC7Xq+mcub*{Bdajn@G+tk1Fs{dg*{8$egA{VS?wi&>>%;8+ z7E0ZO@;SsfOP(!|>A%9v6pZsd>^De|W4B^AcP#l7F7NmDwLF3P9L{7`GX;A@|%{UU!G0gCQoH~Njn}r)# zY>{r=dw{bXOE6Pw8jIocJhV~MxqTo%@AhBBmG8}r9oK3~dWxU}ZY2kS3etF*Ve zjibtcK|)?ch?7mHv?x{%iPe}0mmejhtV;NBVF6&*F!zIb!)xa(tvr*+Rt#u?q7z0;07b&C50QX6d@_O)2LJ3JRSK`1*m^=<8GP{ z!@de>w##KE{w^m&%LL%9-gdGMlA~i3Lwku=@NI#ByKT<0lAXV}Q|z#d#5QsZj57IYUo9rEV<`+&lB|+G%8nowDCnOg!c=Y?o;Sn zXnKzp`NW~p`lf`RsAR4QuWVNLRU#umcsxQWi{nut?&xwL1J+r!P1|x(IeD4$qg0Ko zuou_e!ChgqG~=|3tCbv7sgHcK?artVK8z$+ML$9nKt5{n_Z6Q|rODSB{3~vGD1ckq z+p@wJe(gNPy~|Kf4`IEW(~Ycd`p}rGp2)!$S=yMQSc2M{hSXzRBka5dumjO*1udmR z2tR(=V>U}Ix(yv+zCfX-Y>nuTJIThrAvj_m+N^h4JIvlx9Ub=`0)}1y`Wlkw_Hxm@ z?EC+IEL(j1~GstN&#a0Ht&4^=A<>ZgRyZ4LPxOT1n4g*0`DJx2Qx{Nlj zc4IyGs@H5K#og(!ZS!I*)a5GEL_HwRZyPqms5(CF>EdmJ_j|sSf3VXb0No3RQ}nN` z{*QhJ@=zu}2vMqx0OM2^C9y)d*q(b?@vDCsT=F9Ex%Zlw)(CQm^%u<7d^%TVPl#9V zIq}F-oi)d(uNRdJj?^Cq^_p^IqZ_>dE^?0XryzCc&tu>A<}$3VFBeRm0CoTHr$OYv z&1B+DdgEVeeF_m^*7If*W&r=_Sl z(7jEc5RPV`H3HK%L zaC&w~lRf-VoLy?XlBEh$1{UU|)G>G2Qt-tglH;kEYqO6hJ432cI8geLMv(skl5MQM zK=AX;y&eFgFFvi*RCvCU>mYG?orf@xP-<+BylmQsULXH1R9qWwN5py5RvHAim5!G|@K-PX2!@qrAGfT_p`p;0 zmW$CIpJ5vXP~MJ1>xTF*C2M&SMo_;w0u0)Q`#=4Q zRo5m!S~T|jQe48e4)FNvc$WU|hOr0s49P4cfO9lpI1;0z6(s;@)X>U+pb;I_P}nC; zKU|K9`0OExGG)~eVh+89>vNoI%6N--!0U!AeSTCj>YkI=9nnh4X`U>3meV+iSUd(z zUnlS{Lo-fUzWx8GjJbdS`!13K=r;II!)6nT5QQe-un{ydA$vJHWzmL1E#18J>wwAL z{(k=jJXwibzha7g=!nrod0`zW>Ki@*3r~cMXw)4RHsUsU`-M6bg)>y-Q&S>A`Thl> z4nJunlIZ(FNljhJLgi}mKnJnqbwcY9_UU3!vo&P1c+^eJ^i2eMAmPe%b_T39X2H;4 zIbi!6ps$U#VN__TaYsNNzEITv4Y2)aH=(M2&S74mlgmWnVFH2j9ha~s2;x|@mvta^ z&C7wmzSg{bk~G=8+*~Ne_CL@sBcX1;bTWT*s3}^dF_Z(WoCWdX(M_hhUCaSZD2VGo zDA*c8HCe)~7lpm-JGkb96e1d5H}0b(M^hVzGNw;L*LeiKT>BU ze1~P@6$eEnFA6QXG|m!Z`xnmdU8YIOafdn;zqI`5$zN9(XY?r2Lc#Ln9(!Sq2P(g9 zwV}2$K)zQj>r_^0IDn?u&Gp`qm@;sEJ*|9UhtGC7in{%DErJYCWh`$!kHRTb_Rb~# zxS0^WC@K{aaH^VATKQ7X5=DR$GwC%NUCAF~B{P2Wq-Le9#=}$Av`<7qV>ZH_PmrXY znxelOe#+FCnp5h9zYxb9e%iTqZEDUvJ<%Z?)q z!rH&%gy~EZe|A;*u9>?k$)a3l^YL-m6-E4|2?iD3yT;Cd^9w_n^#*=)@vI?+7G|fE zEl>7JsBp(yPaE2!PqXh;R~Rc8{QInxb63s=wIj9q1{h0z#Fu{IvNbpP*J~v=C@RdY z9zmQ37J_8gwfg62`SYQN-^L-K6QfW;&UfZa|MMd&0fzjWGkkUDJYyRZ|v$%tJa9BrFaa>Rzaz#%=M4f$y<1|Ev# z0mi*%9)4S^fg10DsSa&3ijOlk+gh}ew5}Z5D?^LK1KOU-v~99AxnYR(wId?UvCAAP zYrDaX`)J~Gl9hm37_)GFLrf@RoYnah*ea~cTqQa;-sY-BaiXao-KIXfagyQG)+yT< zXI?g)!9mm4`M;ZesWj#2fu55WN#>Wzbe7Y1pSmfP8zVyA-7?FJ8>&5et&rfQ*Ja7V zjsu@l@5Tj8t_jd{jBgay@h$ziF~(QVE-4!}3MM$g>^E`FWq*>&b~i>eCX`54O4Gzj z2Aj$-G`yjej%+SAxB2uuOlSUM-U!gHOSGMp2^6fCq*7Dpopj)0xs)4G?6H6w60Z@~T&Eem2k}Mv5x=;G-3?q~AVV3Q_B?YFf1M3ep>#@f1)l8ooZXoz zfAV#h>k$<0Rwi$*DNeYK}g)n?X`soHBp23A(igs@+ zcdAa`%`dC<1;Ver{_b!#M=ZR(o(I(bsy4uU+!u;!X=C&K<>c}7F^-CK{~wzWu`ddo zSpv914XmN9Us!cxz^q>>rb?6%3;Kd2UD%0PcKgC-b)zJE2mzH?auN9(hmKM7bsu{( zQP!{b4zTHfb*KB?0njyCSalf0jlpsvQ%7ve^GFs;Ib(I(=UAYun4c%j<(5Zkl!fw3 zQDaM4PMyyUS(at>0C%fG0#rQgrW{ncygPg2TIOHz-9#dHAQUp(pN1F%3X<+`iw+g@rTYL*+$ylNP=$#i z-?|!Jh7{p53#uu*rr5HtTka?|!0`X$8?Q zr|jF0Q_nl3bn3~nnaI55BAVn+Nsws ze(mQy1Fg$b3|pvw|HtVs$ybLV(joraPwYLgR`PnVz2Swk7 zpKOYIoy)NpL=UM~8`B~4DK*bz^JAxqGL{O?Dcq3!td8CN<9!HZHmq6S6jY5<>bYOX zVC70;=2l|JXy!;}<#?dd;!|Jp+s-tVB4E{!b+S<`d%>?VqQFU6Wl{%Gm7cS=#b&ON zS<=rrARMFB%zz*9u;5*0)bjFnY|f`QVg}Te;_mM!iP^f!!9#g)z(hCd>NDT~zh9zWBaBdGDh5kqhn*Sg}Ce(@q z(p0<+;uF4qL&0Qz(+_l(UAw=YDw?_bdJ9vr3O134h6Xe>U%qQPzU%c#7iF z##`6a6JU(^nsrM&%|Mi;}<|5z8BZuZmkza1J z{0a(o`Ox-yclNEJnGcEjo|rICI;?a5&%fnIX59a7JT+<<{{S9NhjRXioeZm@C-^Pf zSP4Z+1x@je3bST{2w?FsM0D+*ZbMf$K&4Ypw?_Lsm7tRY6%s9xiQCrcqs& z7EgkQC%B$WVCDhXt3N8N*M8XEg)e;x=Z-S3VF@dQ(v4)$VjhFPl?&z!eqRx^IP*b}7(Z;pJYc~{DB#5i%TPKxumOaZ@<9=^0IYWx zmp)y8YYd=s?(fC`B?yt4Iv}KFnQQZa1G$|?(Pz4iQg6Cm&5X(zF_cWLQL&|Bk)&>t zq?&>d&~MZ%E_J95p_wG$FIh^gv(m`95_85)C8qOm>QtiNq|--?vSo3QF;9MN9tyZ! zO~<$y6JFI1PW*vrh5w0G3b)YUQ~E$^Ob`lM&6y$NOF!uvWRYbqJ0t2Bt(>mTeV^q4 zGI-e~nor0Dk}pZFLOtTNga6AfOY3T}Hif)ebHkvAaUH8UWgLH>leDy#`xK0Rvj?z$^ zlpfU~8sbx`RT1nshx6(elFu(|-j2;dpo+QQ9H}nv4+9Q1&WnR0TZShCVC(nJm{(|5 zl0+^*X2}c)M>;Jq&kS}^do8c_g@NE`eripC{ZSyj^ATvnQ@y3S_zNUd{za~Ec}eSy z*F64uWdXq$8PVBrix>W4O+BVJ-SJ<0F^N>=jBRUt7fCy+#R@OMr0gf;%?WWvbAVIU zda+Q2HaN2PtK{-mrX#PK{U+P+&m0A&4iTnEVB}}$eLNhpW9mWPr^OhY;q7=F$5_ruTbRRbd` zfW|wnUdWdML+#si|gJ z=d;UT9io4V+@3*1JRIGr%#%DOIp2z<2$Wh+nEBHFu%1`kYAh?NKr6%3Nr(TlurSd# zCiC&iEarb!Vin}S%Rjtd!Y_RcjgmR8Nf%5?37o(M@stsnAtGC8S7WzL^6UT_j&us( zV;rcCFlWKjh;^NE(p}F5=4wIGSarTtdt#*c?iFy!ym3w@J)jerEyS_lHjW{^9H?0IAP<*l zyfV5&A^KvMjvuuy8k&cAbc)hz#=6OxGM`U3k!w9T ziMV?tQsHoU9MZ9my_i6v8K?LI+NYxkr(m|ElGy^Wx6)d4RV|!r>cV%H$7%723xj7SWk^x9j3fg@k<0r@|!; zP?*=k1*2#MbZ!N*Ze%_qmqsZg#?!y+)6`PkNT(MvqBbBM8tA`)p&CYR5jt~upBDLo z`%iL^PJ7Q1@z2;vg2eY`JXrX2%PNO=1w$4P4prlRGfV^+$3|USt2f-w`eS-A@Bb(l zuxO$8Sq2x?L3sZnSP$mJbn+KgHR{{@aAEPVBq1^u5r)Cp43s{+lVFQIrt`}UM2?<# zgi?@O|2uqT%8z*`v?$mNqSYQW&|-kFH6}5%Fpy1*B5fGVyg;3@$TE>+h%6vVX+G6Kd8mA=cI?M)b%x&&{1W=SfaEM5DyQ#F0r zpvm|`dAKGKD}cJx?{$H}3;_cq><2}s_dNgIu^iyTc>ypOnRu5o-MBDHmivjN?=9rT zRRyVjV4Q^ZtsyLQ>vO@QcjxZkgDNDobR{+UB$va{TaQfzDCPwK#IM_Tnqw}yJRF^GFm^@B(ri4G5LfN`tVKZ};yd>d1ZrMRS zayDwF*eed-nWaHT_TcPD9jhJfV>@JXSoOo&VPa9WM0#H-WsP!qnweY2-o?8**nrK5 z5C73zd$K)$L2q82f;K9?5prYCat(H4W0PT-&g-^hf4=ELxTxx|;*x+cY^=$3r z$=$1!8dGW8k8)ZRIVz8n;NN3=vExOI34#hqvL&4Dve{^Bg;e8X=bBO|Ambrxc|NO22$UgN4?*FJ~LI^sw^J6 zagpf(5R+^YR;PdfkQ>UG1?bKIi}N>tzdfiFoK>j+we_nCYJ3gucqLV)F;|@+L=pe4 z$#*5BekrEjTzyOtCN8)X+r89+Zb|Qi5nsWWx+ujGI=2MY84-JO2%-A^Md=b}TCoTa z^OI31@CUl=G+8PX3f=Uz=@WM9fG(#a^1cy<8I7`C8lyB=ufCoP{jEl%BPS8=M`m(| z1mC0Do{Z5Ew-zBij=_ZcL1)}Co9KJ?z)#?$;;;Nwj66GyPhHEDH#OUibv}Dwp8``m zi(TiG-SNZcSSeoxjK`4p1y9ngmf_D70z@xMaLVP!|30HS+pBtec(jVw?|vb9l)b9c ztC&_sTE;RrtQcYJ zX(=Z71g&iC+4k0o(_%=Oii%XY>94&_`D~?Uiznt1l}yLhk?XXSHrX9B&nAJWUH8WP znmTkMo!`MLMc@GPz?ytBYFhS#f&z>w8Lmp4v{GoFv|#6R6?4eyO!mr@r-?PM!X}=y z+$Y3>+L5jL@~pI{SP<)z-^4N9(gMQqr7^Q`lZ~bYD?tN7`?$08s8>OW2pL7> z8ShYczJEi~m|jwp`x(nDr~Nbh)7O{8L!vSV+lb)kO%mVurGek-jJUAqWVED8ea=1; z)WUa--krZ1T2MJmoei{d;krd^VuovF6-?^eTBZLh)o!0SDe`_vI(8>smiq<9) z;vP{jJ17IT;!RU!znmScl>$ekC_iqSNr2uy(fi@Xzjt{Z3%L$zifI*lx z_`kdd0_4j7m|mCp_$iG(7{O-@V(bYz!u0PSl?_G-FZyQNaCS`J?m%RW*kP_EIgfJX z=eFkF9nTf}RDXYjAkr2>(q+;pEE9}B;kSZ+%L#Ml@2pNOJD*E>4apW>r#*Zow-kTh zaMX^3X2#P_7h=jABPjY6qaBa^{xa`SfP_n0rzN zU#!z)uU15AV9{LB!8=c&Z`$ANOd3|uMv{8aLXn$uQc+yq%<-tATqI;rx1u{Q6nv(w> z?REUNO9K-+SkjF};=^d3?x_&m&Oa@X%S-vPbu)o!9DkF?96&qZu@-G&zlQ~JKzG0g zY%DPyo~BLj>NXA%Vk_wnIBc{q9XUOpXgbVcmDBZZ$}ZW2oUeEbeQZ@1+VmIAkZ8yRHB>^d z&$OSubP@LjSQG^;NL4S`_EaKuwTQ@W0UNipqMWZ-6j3iWHe#>+)y9N3?2LIoeVl$I zXu?)S&S_4v6>=&AIxF32Kfj?+sdLl;{TLSffN2=5zVgR|oG#tqMfk@Czf!a#czne3 zxdiucU9KqWCY^eQn5a8#Gy%pJT-~Q4 zv^*2M(=ePH6>dJx_tSh?@Oft@)b50jkB^qt*1d6L60^wLPA!TmDnYG0k9}3{#!zno z0X7fuYgj>*E8)#sPb?~kU3o&p)97z! zNo_A)DYwbL=EvkQvtE$;O{vntsg)f?!Z#UJ-)Y}!n$>BLRioQDbd(>+>q(A1{@yIu zVM&kyY@Z`AAd71e|8yCTIlQlec%XWr_2@=w4eM&%Wh1q>G6=zFnyVdcgeZ^-$P91H zM#9k4VM(zw!kF!;)OE_r9gA0+*8(GufZ0E#=8ZICrNS+VPjz50(q3&qYC9!F)h^xk zph2Z4vr{~fJKs&eh(jOvM_GjNgP3*B>*a2vNB1Og-8BVY?_B}NW5qvryGIwr9|Dym{bt1Z1#O-F&tEw=Pdmu=frLWDbrVf z^!>$JRn$*H!v1r(9qr38#FKi0(;5}O;Z5wXSlO`8vYy7N|Exkvam@IPJjBJLOKk`+Yuz9k6co<>f1(jK(6n*J;$Za5t&JI0dD zQj{Fr=|hVmqq|1MlVgwdno!^Jz(VR!;9?5j>%jFJ*o*#B<;%63v|ooAZElnoi|}<) zEY#*bb3CM@_!pDRh2X|IvoYn~yJZulTc<38s`=}L2cA5n9f_$?k9x@=b|8Mn(1jolz#33XphYYEP*XiCXINJbBG=vp)}$; z`_68@t@R0VRR)UV8FAmZJCROv8t1MQ!4VAc5C54+eO$N|@RoVZ_q}OBPppd`v~zbc zekk+BNn6k^@w@Vi(pg$DsbIB30l+P8g2}=QQriL;ozSLVUdTWP2zrg}3dfhHi|m2G z*T}QTm%v1pp4|(uE6aO(CZH2Ae9v@H&Bpw?eq^;K*f+Bc;7eH9ZMJA#2~)raNS_e5 z7GBBN!DsOMblq(F{PaL2hi0+c=_IBF1nJFzp~{(*Fh~Fyff1lx-mQ8Yx_KB%g&M`Y zR`v5Sw}PX)n0~97Ev;fG*V_>eJbxY116$d9^T)G-qAZ$B@KCY!sae)YaP$|wx`~_H zDtGOQ@g?yNILzfC7BXU$NarH%s^DeaEJwRB2Cb}VdzmpIFb(zNAF(mJV0!&vHgw1g z0~@N=)Q6hfl4?m*OiX5}TMG>%11?(;a$6xTiXTy$!v(#axtd6PB97P_o|znvakAl* zW{PMghB21CsYD}xSa~6;VP>QiMoSoO6ru@4wp(7VA1&fWhd-0B!)^;8{;U<6uyk4& zWKG^_z&w(Y>pJqK{WBwDcPwB9U2Y9mx{|ijljdrjp0UEB7Ld@&`Pf7{kdK1GI+=(V zu%5ND z!b_jaA+bfiL`#Ay{0Csqj^GWcIbE&2vROCrTd&JT8_sq*=M_8FeCnAa?Gxwby~E2C z1;(O`0X94Iss(`}&{fxu{QDsjysogLYo~h;KX`y=x_mOC=7d9>EC^kFxVt#}!RV_L zV(H`Kb33Hq8`$u9VPoR?VZsp@H$cY7$XGXU26&uEL3!kc$}mcV9M<7-!NCrIa-vae za5p^UjHI_13h^)E%(>GEFmo4i#wkO#2MiM{^=e;Bu~S$DW>Js3M<5d>A?gjfk6&nE zrwph}Xo+5vZ%CI-iiS`>$_b9>Oq%N>ZY7DZ%HS5-XR|c2mUz(Bbur-k&;NV{yA1W{ z$kyh|pLAht-ho82&3v_65ZJA%{5q%2)&9DKfdWji}z791+>Z@D@j&s=bM zT!2>^W%fBmyav>%u%i@(|3y5y-R9YI>FKCf3biH?u52z@%l^`xs$oCg$}=>L^ujC7 zq7u|z`uhh$*{nhyV}gdTb;eL^G3espCi|F3;VM0a1Awk|I$9|{XiHX&gU3g;9@>q- zQJa@FoKAjPZb!bDgwj(fW)S4jp9+vSqbIDqV_N33q`b+d+?Zf6Sid&Nv!si?fq?)* z*p^wIq9C+%94a+7r1|FCn!S0P(H0p&%ujKuu?aP~g&Y0_-Sak}#{)r}%45_*az+i9 z6xUmQLZ6iFC=_eM-*$xNvqNJrC|=xq|0C5jha&r@DK@ zt#aMDi_#Q0e~Ma}KGLU5YpCsFc6TXF#yd1iTjdDUY@s+E2)(gM(K2UhC1O(FJ<5M# zx3g#(>p<6{IvrC_XcxgBsU2ZWCm6Zxsyk|5jicOs_2{#odOR1!KgY=j(Ud4YHw)ku z+w63Hh6}TB>2F__{UFgr(`9g{Ozi!oWV=5@K3j!zD?6jGudeoK@4Wnm0dGB+AnHW4 zEpr$+4w&b@IPuYSJFneDeClg8V67Ei7_w#hK_jHgdU;cViO#!sf_ zIGB>$x2AD0ld*L3YktVFt)vLI?lUdmBoYWya7Nh{kCUZ5Ynkvt>*(mX-J%kRi2brf zX5I2|8_Ro+@WE@31bhB*Z0!LR7|b%)s$1KYnAnQTEJ=M+Vef-n)d``^`f@(RN}Pyn zeEN&;c6kIw^XNnOFiTGN=4d$uDaf2WRmDeheQ z7y@mKj1LbI6yH6RJB^}ir!|C!r$sXUxUb)YtplyXG^?g#$hcgk%Ix}TR6okhZh%a% z8b+HGZnSxEQK1X>lexDm5oYT?j$5f@YLSB3ob>TT^{T%s=G$lEE+tm+qzB zNzF7Xr-mHnEM-sd(PjsYAnC4PDalw@%$>CDyRGsR^efutLAnwBZ%8H)U#K~gN7z1P z|70Kt#Z%eel|LE64bdoaB4Ww*fxQEpf{yW~76(#4*lMS@>A86wwFgHawp0k{yLGJ( zs0x|5-LHC@l)^oI1Gi>+{u(G}P2Ogh_{ng;S}U|RHNf%2uzhVA(^-3JR-O>KziG?P zoG%brGekx}aZkYED{lg7v-#fc)iI#SX{=wgbe}jd+~JqTDZIXFTX(T^9JRl9$qu1) z#h?*;9!SOaj%qwfjWm8F+RW3i!)ubjH;ZF&TB&KoqWz;xQ70J37sJ41K8G0f=!H8W z6WCIY^|Pvcg1;$(H;itxf$f7mZ!85m+j&iGmDx!hKTq~u+NIy@RB~PKZ6r^tq}-LC zY3T=%4>HNys|IegwJSsy9uWR>4Jvkt=&tYiF=-RQPQs55kPFsUZKjglrqU<=pd^SC-7(Ft&cy{1&nKEw;e5BrJ>FqN#-+-IrgH;kAr_zw0kY3 zIBd0pVw&lzT1p)&6wK1Jv(hF;0p5%OtM8EYuy#U6|787cRDeZI%pk2I;EPKdGxpMo zHNVB3gTJ|~_))$9gt;l&&lISvSy|^Rcf4iYM+}_wSz|RnMT@jBi9rkXWE+0g~zYI{@CH=QWP$JoJRXGw;hGk0_|9>uqa~ z479YgET3a|zT7fhpZtt$Y-}9GBz_kO_PIG)3S#uxmvP`Cgaq)N^tlX2h>=+W2Hd_+ zBGgT=OnnK#BODMSGz<)%SLDs{oh^alxa>wJ#>7=f)fvPNiOa5~MMfs(J+M-md+CJ& zF?!DhXZ#sBSGy_UuuQWnSrPMF#w!T(wL%%eP5=w%mZtXF36Tq9=7!mrT#HI2BHomM zOgUbLFCRB0ulBn~#u3COt-sj!YF(@MgIbyVqp5_*51UEKs=V@vPL}@0{xZr#=g(Vs zBc?wGN3N#?f@9r0P0ABucU!!p7WVF=u0G;_ZdsMNwve<4l-b-(0oUl-5zHJN+YCJ= znJx_Z;5CKaj0C%%gQ8bC+bWQ>*6L0c!MwoJO{M(~%zL9gUeO0 zK=zBzfYSs`u8)>zBk{+yFo3yejVC8|Uv^ltg^Y|yI-jnpOxAlt(i#-yG$;~319lxo z-!o)YwS62qGG5?(Ob_6=OJJ&@y=gLTE&FbfGinRnoyxcD;)Vun;B3NDyi*r6yohm8 zr;Pqh$E(*uQc@B4LZAVfso4a+;^7+FAZe-f8fmtMtt^$o!l-($bw}VogO0QfF}s*zh7W&n|9<->+T<5Iqd|J-}CXLJpX_TuJiKV=`1=0{nSW zemPI@H6K>CdB{_MjJGnDXJ-q>!E3kuoDRS!!C22JQBfik#PKG(pQSM>E#@ij>>rL~ zR5DG)oMPg-)_@sCSDc^J^05QS8vpn@s-6EXgYb8pB<$MVxVxgh zewLPV;%Zp1H<>}@QSDjGB2UuL>)^{4_c=9<^$~-oTCWZdC?Vka)7#hzQB=lEhX1afh=jLdUM9*t zYHLjE?M9?g!|)(PR7HnFgh!v4YW%CB{9q(WewmxAE7-n_!V2en^BJL3k|0?aJm2q=!8TfnGVzpWl%Y@@OQ- z!MAjWaLzeGfXW1{7-Jwg zleSg9|6U#PMW@~}u5_x=aZ_APt)r`p2o)7|vBkN#RI15oTRJQJUpp+|jCWT@T_Oj! zvdQ@zRy560I5T~e0s*4cC0GLf@+rofnCbf&C1C&J}Eh950hzgs9oskz6frJvqd*XYr|J(%#-)W9`Tr2yamW2_2KLWV0KEB*UT z!AD>5|L?_yQ@+e;{rAh00tVasr-{5U-0?3(!oMHLlyGmIj)bhc18}u1wz_@rm}92k7ZO;L}S51Z&9QuCSm|V1w~*85(rI-NRc8Sp$upOBPyUs07HmC z0O=%j7&=l!3?x#GV5lMqfzYH%5qNi;ckcakzufbQ=Lsk0101A3zw$g zYHDge7n|L_or^a);fijL8SU!sPQ$bKBW~f9odIFgUJ&-aDcjRPt~5|)0c=su#&+~Mq2s!Nj^M` zvU4Wlm?97d@IBf3EXdvcY|v7!o&D&C$V)(q_&WlPM_28MR_QC2=YE2wU{N-(7Lqka zjOat1>hJB%R$6ni6JS3FNV2auu!Q{l$Ckp7v4}uaw77l z`zsta)|Pk|e3mZ}9c*Sy@Wu!;+Nom)yT(Hr->7YE057%?FRIQwFQ0-F64S`RYp6rp_;VP? z>&FI`J^~iyb44g>w2gytTE3)zt%eUrO&tCyH`bmM0r;)LKoc~5t$ePxeiL46tL)sC zkm=TbsH>GABnhFmddm;bv=;jVEzaDCZB;oIwE@LzV-NoJT9yqU2}wZO^R3GEKG(U1 z>$I%)Jrk#32@2rGVZ7;SQO#^XQ?25W`HIif1rC_}1xjByScFxiDgw!Qch#+E2)oc$ zxzuwT-IiETpj1|9J>HR`l~TqDN5<^ruzYxBy|xiXIGzcg(W=(NHsAq7D`nqiS33oB zfVe=#UW{9q3)A{(zqD!@@>qOpg(m-zp!_zV(a6_LhP??E%v^jcxP{#*EAOF9u9-kn z@M5|+rQ`2z)O*j0d&|cM>u~E#VO!K;n{rZvHU5!UI?U!=topu|Q$xWUy}|atPFI5I zi9URS;#Z1Y#VmP*U_rrPIwtxVze{M_D-+;bqdO|5(bsf^uiaQJ* zA9Wp+T`6^PzFk%7<7n4g4=|oR6L~U!RhxdZ1c|ptWN|EF6MmbItiu-BgK_t;wfSBB zMt?o!`&2>A465xtr}nh)PvDr+>nG9phNYrV*fumB|6$1di6>fE2h{zB;?|mBRuhI~k>@!#+;zs7#M3kjYpN58mVk zo6$0-l(~0YCjfpTi!j>zd#0<=P!7BQ5#^+U@WFXWZKcl4mgdLIx|oyj?j!eLEdR}U zkJPEN!x}BSPf*ZMao>#ABvZH?xG0UXa{%z*imJ1{V$7=;~O!+yZOC-=NK?Ku6Lz{cVdP_CTmZ_Ju;4w7lqEOw-hHR0`{~h zt%MwoOcu#Tokb@!=tX){-X?$(uh(IV72vj+a%(OnTvL{A76wcAR!qs{ER+nSJoVG@ zooADw`BfQaxlb{uR&6{OpQRhB^f@^i&w_m^csw=CbW)jRYxSC(Lpj@J`o6yJ0vT%} zA=;xKHuBYf0Z7J%Hovh?qKyo4h$J!vFqD1P9Y=o)k%f z`kN=%D=`WAkYvV1oyEo&AF4t34G*03WLa@AB8+0SS0ah1!)9wP#L;!Y`#xTFN-k|8 zBz52fa%jB73xi6s(~q+@ZArA6xhUPL7FbHya9p>B_K`r96<+cT%R$B5-_rk-)Y?~e z@&db3gSe3P^1zw&B7H7X1(NIgKyi4HyK9dQ%)5&A)g{HwiS-Mg#Jjemj^%;Tvw-3f4Ys0H4o2LUHnhp~BQf7F z{0n8;ZtD)If4t$FA8K~nnr zLrEuBzouNqvv)kjK6))HEL8GG9gB2LM_dFv*x)<}S?^Ot4E&oTE1)Cdl&Ytxbo=y| z;vIYsnEBVC?%|{0d_=Z$VqBCTlIZbaZ7NYVB`Gt?d?LX1h8^*-O~yh11zrJ694JTg zRbj6Xd@@A{-C&J~yc7@Kp=N~vNdXZm`;_1?_jtEh1H9Kdeb?=^0j_5DuJhRGvR>10 zrF2y--2pIT+rprWC zdhxPs+FZ;&tSdwsH#{{^weUK~GlN3jUoJIrNjFAn?M^Eg$5nvMox5ezc`^ZX;6}r?(U)E&0v`zCGLp#1it`Di%edG)K>28YF{(K=(vt zQz}IkFu{;=#y#73y0^MYG&Jpb%qX>}=$2^M?ehD^8yb5Ny84EymU6f}I(ImRePJO? zb5RmI`)Ii7iI8SEf30t>|H-KT3Rus1C(n6+b$#R_;N50x@6#9m^)L2WjAzM z)uZpJt~ky#62^zCmo(i@Gm)TCM9ftn1N~n0`5~T&A;-Vbu5!{qg-~@k5075$loDR- z_PwDQd-H*k_?-;7zh2&zO&X)mJ^b3RZE3pBI=01ILHHQv{8b&OGH~VK1HhRby|orf zaP|PMt6Yw`jUQ2MEC@)+=lW2Y7@|+1ND4}OhO4zzRV_Yo&bB=jqlMWYl?|wlqA4QU z^DOiYMN&sD>#8NY9J@^P;Vt#r6}o};4bT=CqK4#H#5owh^PiYBey55Erkl3YAP|kY z_lMoS!Vuye@fV;A;W{v544IB7e*4x2=Sl3ygJ2CxKauQ$EI$XVuh6s#z`Xy1&K(&W z8yjidEt{7JcaNiLyi&x>#0E(?b%p~wYh;s6$I%@Q89MFfv~Op5?dR+Wt);XkreJ+4 zJqOj6Bsp=nJ<L81u71KWg0M;gd2h1Y{n^&}jSR)pODwOPL=ACkyKfG&5Q%j-YH_ zxADS}ks|J>rm_5rA(S|Zt^M4TLF!k5)tWYughbYtr<;s|*G&A#^e{onWvH+qq~-Dw z(}b$mmgrSS0gt{-6fq_{Mfi>vZ8@BzhRCJ|8n;s*?X7{{d@niVD*l2%XXaQU`2x^< z@ycCd{bh~spAFtx=iSwI+jCR#H-vxYSk)0qm-OroOgrD?aZOk6%iH<3ddt#McTRw| z(Oa}xfIY_lzVBGh$wIBQt|(^lnF`qkg0nYdpp|R08e)L=S`|^C)ru4X$#hY(d@Hz{ zpqSK2t<@5X5Ew#E-jQ z=4%NA<*E}cUA_&sW)vjEzjN+hajTmitTBJ)JwIZnv`2GmwIlbIVNYvw%~KB*WyvI1 zeT=F`)i_BNJ`rx!H_kYfS^GE$q%n-)Qvs<)zC89_+mHuU$4Np*FbeeM5Rn?N*J1vk z9l_1gIX9)^4t@xKQF=(YU&LD&E1 zluzB&AAi-FcDXM7TCIt7s8!-Jh>W(Jx035R2Lk zs7MwijshU0$&GXJ^Y_0y*^|!?oi&#zMyXUP543#7|3USz4c_p}v#x@0$v*h%j>D^S zQNb_y8@@bmFhIA?m;S0n`-0_zX~*|VH?o?9v|a+Vhyy>j2j*0f|Fo6H+ITvjI@rl% z#gX&L9h>@_c;SrQLJKAYQts2}vlazbtWYeb1e zVkOx2MjKW&g$dp35m^GQ;Z)~>ZeKV*8}!KT=Qm+*zE%l9k2mqP68JV0-LGfTJGu%q zRaAct4sRsw-R-lnTv6W);z~a(utef@Q94qOMb-Vkrfg26d`}g^-Mrl6Q8Ab`?h2!>H?ozFR2}l@J)03Gk`1cGHABcPu~G;*Z)f(?Em)3(3cT8Y|f05wGx4*b`ZCHvn8b_r|+v$(0+ zVW!_@%f88kGl&4no{*`Qggih%tAQ~QZ7!p^6PfA0Ww5`A`89B zdiQJ}I@qsdV?ttN@RV0=>WqolWY29cbktH;RFJ8lh@=)FuS4)4KZlgH@84G>Km}I> zmQEe*7`>Y^ZDwJS1K^6=#7mq&pC>h?!m`xazA@x?7f;U*|Y62V9 zpIh=EbUiI!)F;DT;Q=63hz|Ba?JG=AcVLbcLbccO2E3}zU0*bAgMfN($xFAAAZ zP`5gkNbTXbOm+G4!A|^1Ak%g$bGTxCwqBf{iJ8;$@|wNwvA~0Y|{FgYq!Ds?z7422+9i4p{&w^IN&~9)c!m7cL7Ac z39d*_5OLy){b04d#80bxK9kE{qkmD%vJH-&%X6__ocBb2_e~ML# zK1~vw`lUD!gW{WlqrBD)J&TKf&I}wruz0Ry;(pu|siu(mp3q>JY%;SzoChjABj)vM zM>lzeoBE%J_!+ze&wrM{q{oi2XxF>dE?sFO&e@Q1yi4 zQtl${g2RXXpP>TX=s#U)Y9T7qjVs2kx!B;`;lRpyJ)A1 zIl^!4PMZJ>2L6gKs_mAlCkQ)n%YJKfgDOWJI0os{i-c32K{sfZH&;ga_`2la&A?ys zHAptd=c1*n=(u}7>`I*@sj|=?DV(K;=uc>YF`!v9AwByXn%rPwbQxY7wO?NnH^9Gy zq3SQ*2H)2B?utI?-u)wy>B%_Ie(n%Rvg|UBcBXbaF72~h2{{>$c5AWBH~W0VEacpY zHu&u(!)41dHzM^p^zk6t+$M9zeRk;W_|%l?`b{5m$;oF-?4wlC&A@Z)t4^?EC1m`rMc;YT@dLKw_2kPluHNACFW%1Qmb~hHTe(nj(gXmlg>1l=a z3zG+;V9G?NZjyD})LGg7-AT-7LR2Oy&gOA%vntN`0bd<06t#=)%Q*iB0$bSI#xN1`X literal 0 HcmV?d00001 diff --git a/papers/atharva_rasane/00_myst_template/Distribution_of_average_others.png b/papers/atharva_rasane/00_myst_template/Distribution_of_average_others.png new file mode 100644 index 0000000000000000000000000000000000000000..3c2d1057b91ccd5799a12a34612c6fab85c1b2cc GIT binary patch literal 121838 zcmbTecU%*D^9G8dK{SGjNFsq?0|kKuq-g*VyR@Uyq=wLxUJOCHKoEg|A{~jnfdoY9 zJ&;fW@u(P(8bUy&gd!v%l)Le~=bZQb+~57@?&lLUZL>Q&-yS-KKUP_-8}V<#VS*io0PyfG3+gj7*J0L`qY)t)Mpp z&&B=E+Xjh<>^UTSY`Adw;H-$q`tu8?jjrMxrv}BMT`eDK&0f*F**%!P02ZaCZ9aI> z>ZEw{lZ}NcpxxxX*fXnF?WGy-f8O57IQ<|$2=XZn->zpq0aJcS>W;9)d^Y)B@MB=2 zVpvdJGgK%u6Q+-qTjM&~9T(mI_ogv6)kpgK$$pKN-IT;T|JlG&@d5w5Lq3hp|ND(L z#2*l)zu(a?`Ty}1?;puiTwo6xO8Zz5@)J z`&meG>C9C1^z+kU1o4;&EPYO>WUZqg_7ap{?$eKNSYPGnj-?sXB1px>-Nie1?!4DD z&NDl^6bAek$~u1HL}}=Pd+i)Wwzo3isjarpx45@<6)seQoT4{o)}^A~w9&1s?;Yp; ztSEoFVwgZ&o$}C~$PYpUjnk%b*O*bPq*j+#B$AF%#^)b#o$QX{>4pUPTU(;|olYeY z0&ITROqJq}Q3H7RtWl6J@Xp~Erg_>2(J=Cn0zt7uP1^Z`G%f!h)A2Hnjl}g8kE6El zHf1^9`jIWcs;;RqZR6(+uYlC+dHXZfod^`l+1)ZSdD!uwcZRhb73<<-93M~5lL|W! z+#=4xAu;t`iP^)O*L+D1HDANo9M%4Zbm7B`Q$DfEj2MqdIs4RIc1`x9tmtf%pdw0> zv{29Z^2L<_T{~daRTDtv~_OOy37PI%D`ZSDamqLV>`|MnHss<+9^WIK+s1Y zU7jRSIZ^9jweuY+irJXnFS3R$6B83ppFG*ZHvL zSC5U;iK+DPaZTR9gh2wZrw>k+0pMG`?uPt!bN}{3StChKEBiCm9Bw$KH?=;>Lkk8& zhEx0^ChaQ6bG`i88P=mL__}WWdt*AY#cD7DPk#SqoC( z%XZhUnLxN>>E`nOkdUU$GT9b$cv)Y_Qt>M=3PLHJPkHQSXJ?1)GDN;w=B%&zNqK7^ zV4XImRX2K+X}MaRao4A?os)T*9^Ey{8@UJNQ}gcX8e9S^@=?fM%;qexJZK_Zo-bHg zAg&Lr6hzF%!dtQ$R@}m;FmU7R*N@E9aUEAC8`e~28rCC^X=%9)UymQ#$~t}e^jXQj zPHcxt=a68Co4d6tNv(QImID@QFqdeaEjjPB`qLU2u^fD*YRPbY!SMUfpBnr2mhL6M ze^%8STK*AHdQq9tr3sU0D4h3o3UsjNM_Nr-T3SA?tqt#0q2uIHepWI_i=T2<<0W=C z+}xf+tvi|iu1<1!Hcgbqdp_OH_n1mVIH+Rv^~EuLOrtT(%X_Fc^hJ$|as5D$)?UOh zSlfbb?$|~b(3h#)TOCJUMjz-Zu{#n~L8|_#96s&kq?r*|ODsym_)|S}6Bu;0pW!d~ z$*I~X!?ofRN{|$=dcAWSjZU9-g)wacDh2|Rpmr&kV!4}N{&;b*Scjm~>YE6YCBVC* zRwtcU-=C&x_p=rKM|gj%q!IHPWA=e+Gy^|vBs`~Zk+0B)TTA;TPD~-B@=NoM)`YA9 zZi8s*E~U!Xgu$ussM?&}(P$AB4ljd*d~aLkzWvs%TeN3jrF{R}dLXI-2GMfY0K1J{ z84@MvAD%6ATHR_0tQ=(MyzA%q2Mn^Ihc2ZjfGX$V$kFeH7REtz-)(arzF`Q0(e=6c z{DSmT{zaN*JDSQd(tqwe2Se`x`GH@V8Sq`p;8rW8s=&t|24X1e+ig0-;h&RxsO>v& zzEht%xB*Aiunp5G+!zk?L(qx(EJkaZ-_I|v$K}m(G@cJXhWBSUK>H$5<{&-qbAR2&Lv#s34lU9;RSEXlh z@sRIh1y1FC164t$J@P~)M=>JxKq~4}zGmSD{&AvVed+|6>%aQ!Li&)_SF|tc>=1m& zxE^p?+x$`tT1J~Nlf zOz^`?fJLE!h+|A7!?O_)=%1GLHye+=AG2{;Z|6UY&Vr0&dx2TJLVZImm< z__qd)aEnA-PB!}|Z`fyS6xl|%yos!2t%VMPCvUOVaBsaei!mZi+GyF5YsqLk-;(fx zZT+z^+hY^-O-CBJW8JGc9tB8Xk!U*~CR@ZlmN9O~UtgiGx3E$TeK*SclDTzD-@M$> zm8WVAYf}Y;{i~N zw~|%pTsUNp##Og$YPoCPd^ImQBO@cnBMYri;ZccO=+em(v`(_M-pTQT)I7%)nkRDT z8XSFYyGIONp+B2$&iR-~Wnx2yLSKez2O8}py3J&vWeWeafmZr6Pc6s~Z5yJWlMij# zbiLL9@W`%spXQ0^i5ASv{%PR}?JO=Nm z=J!!+1G|+vS44-Pi2=bbgaL9-xo)2Z&bHf>f}&8@{7Wrp%e4aVVk>MgGS z&Yd~EzoA$1fmLzV9r!oC5&zF~a4n{I&HH&{O(#}@i<-54K)g87ri(1u2U943H$y*_ z+>W=r0G_vLZJh@ENK}VQm0SjIRIBH?Xi7j;I&3m1%KPyr!Ct%!_`4u!kD{+=9N9S< zUK^AVFm_c7E?Ml?z7p6u3#BA=f(wBUYX~g`-9Y4{PC8iVE!^b0DK9u_7U)# z>)kmVOi7bP(sZ-e2n1W}YbA!9!t5Yp-94MIE}pHwKHj+r4BprK z6|4X`J9j*Y+38a;=sqLUf3Hb`9^161FbOhL(y2t20qqm)C8sbYInv~FR*M04_-o#9 z`6LE8exf6+l>|2EEw7aX`{?$G5oP6x+~6n&%TRrD)LfjnRi8v{YSBlR-j?z%%f(Vt zK5ecRhq9VL?(Rghr9>~_&xV`z4%KqOM4mi>P7Pvz@Mi{fg*AqC3YsEHBXHgZHMM3S zop3XV8q#LG4)c5uJwE1$sG5P{)O@=~4pV|7JKV|PUsDTq8ENysgO(<08GJ7KII53v zZt-GsBx>Tyd~A_z3R#vR{Xue^LzXpR7s6$eY zvh;^cNI#+B(tzPG)gzXQ-yy7U8+S+lxTuaoX9LjAU zCJ!t$N2&9?=R{&+QJ@e8}EJlfYC9NU)vqAIiLkW z*szzp2b2?DTnR6S7SbT2vsKI>$|Q!@ns&$Mi);l(|xH4n!ABvk@PUS0zyfL((kmsY%NtA3BVH_=`@tsPy&4Q`#D8d7tgM{ zhWHO*XH3;?=YDdDgoYLYe{*OJbhnKH>UY;@_whbhYdxWKg6x~ zC@?fg3Xg=iYV39$$tPd9#-jxd*IF6)mXtKn)Nxfll{y;9 zGeM?dEZPKJ7f+VUQpp&%Lk$|qLTuIfJKUI@T#4=m+~Pv7PYSbl2$9xqIbC80B}-fB zA$L3TT4&?Y5*3&;(AeFPZfC$;`b~StSN2kRzmoADtD^yi14@OCf&Ek4x$PhCFy#w< zAP9srx`bMCo4H@5PGS>Xg`w8M3DvLi8qu0oedNMxX3gstsbNYQCr6v_o~)<*RJC$N ze)*o#<4|NX+Ux_lrg0G6+k(+3a*-aYM~Ju_1_ImOQXlBuK&Ju2LV~XRUd}#x`i31G zanc8F9qZ{EQh{f8s*z0W6PT5e#xqPb zBj#@f zar1j@!wYcUGwF~UNR!dUcXyMyah%T0cHm=Bjlf~OqEXkwG?9#bdl+RARq8H?c8`qd zW`8dyw63vA@kChYb`&FmX;3AXO7*R=$N5$b8Yq)dTXLaaPCw0DL)mgY7yRj)|T|4+3YIB zQ%|T&Ict;?v8w@-K2&}0^N?Od| z5h!8o_D;O9_kns!V=Ve(eaZZteKM)7v!oCmgPC-JNBnLw6yyr7k^mVa@K=lc$5x$R zk)u-t9=)w$O^a2^x&<;davH&Ivy|d0J@8GQBgiORu~Hq2>{2ZXc6EcPbOdfw>93S5 zpcZ^fVD98HWQbvSRMB?<0M|XM8{Y-aQ4QBhp_8diO8xkW^!`k4#sqz9 zbRx$nJhRj$jCTqt_~b_D*>(tCRH}XY>E@r<3zN7P%_>I z(ylWGMbaBnCTJba^+u2ij@%F%;p%#r8o?>Nbq^;84hih>iRjPR2jOT{U=W{b?AS5R z@o@5KTOdU~yFkwaZx!r5B8(u;$$`BLkAp-&VI#>ofh?WS`t?}?BRy8It<>{#tjjY+B z>FZG|gFw(=mzY8>&*$TOyi5oyOtM$_9M(|9;l9@wO=^1c9&OR&=GSwnDrlVlt8Qi> zjO%JH_$E7()z6u}R<$GcUY#7tuo4bJ6y0$Vtd7bwAXub;WOX%$Q&b&PlhE&vz9~yj z*>F_plk+1oS7WRrMV0DHs8dpGdk9xq)>Z45jGPr+Sed@gJO~HR-C}7iZO`dbLlM`P zN$VO?XAwN6?wysKi~tmdfD52qN}u*CuqU;$vTRP zYFHJ{g%Ft5!z0LqiDqp#_JLulNTpxRlqBh<6a8$l!<~Ml$ynT;u2f(XkxOt1dFFL)&U1--T(`JPX8shJmVT4Z{tIP^;R~`$^Ki0|p63yNH+Y+AAH8 z2$(d$q;Z?WwqloEwzR!Lfug~83;^;dH}Sl#tw@w)CEO~VUGf&dd&c42++&F#Ai85X z7Fj->zSStDAup?h=^wRvf1Dql8()e;%PE~jvkydtaKCN`0`Wd2qL$;fnx1j+uh1cx znWPQ-h;Bo01MY952DPusFP$qM)NK+a4-T)dad|zYNH=A1y;$F2qznMKD0_eNGg4sk zJ*s>d4ZK8uqZ>KmHe0F#hW5U_&y+A0#j1Cv1W=sgn?z%`;&-j2brYd32%dAVRxb{Q z6<`$ZZ99pB5xQFMDuDFss9Ra|aP#5PD##=M{i2|wbt-wdnW`K6>q!mLQQngBSutBY za$MW8$U6ub`rzA@a#V<{cTwDEG!GR81oQkrT*Q=GyS7VobK?G`FYI>>y8VYmcjBY+ zeHNOxsUPdPUMEve$+^}i5B3V`wanPJv;QGx$Gb}hMAge3IKD|R^JM0>exjwfx3S{^ zB57zF=`A;65Dt18t__u#tMrxx`Gcc-H|KF*>EKOKPW7pSia`!16CbN6DLMD{3T)}| z9w?9p+O2;f=V#c=fDZk+pZ_-*l`FOplm+(GJMW5m2dyeN;cW|x&bL^p5i-&F zf7MdPD~(+rQ}&5a`(31Au^@@KZplf2c6=}1qt>tz_}P*aqGjO(&gk8qf$>Ie8F5|) zTRTSb-89v98=}g*pvLNv(c{*5WtjTi{?f|{k7WwqfevhPKHR*4DP(&!zsiVVJ$S7@ zq4KeV3Sl83w9V#V@J#ijniQc#b*5h6dnKc_@ll9HBQbQlw-m?>d<|mo)~R9WYTUGH zK4#I}_dbXa)H?STxX9*|fOg%?y$ocs%g1BAxS^L1vAI9A+ZNcao4`Jh#=)rd)faV; zV*&1$eaR(bC+9+4ARS)rb`u~Lrv?Fb+0rtBu#Rb)xeK;}e4MT2g=3V-N%7g9}-Qp3mZ-4(CE}kx_bY*0rap|YsD#atV zX>Y8Nf$@|^B`~&d~SU<)yeU%Z0M!rSH?*D1Jr}{j6Cw>L|LcBWgyQU4GK?) zMG7hH%Est8csGATW)nzMFLGqu3k(F(hwx@TIukx++tV_V2>+UBv}w3A9^-&K#?-Ye zGI9CdSRB7H(q((K$`Et_?9hFJgZj)?v#x3pAyCKph&ZO6zg(d`P^bKIMo_oDv3CZ@ z^sNO-g_)HA%Jt0zTkx9lsV=jW3!8XG#K#IUpd=ZG;GJf=%(%fO31ekY>;RmeH$0(S z1M!~y2ECkr^Q-NPc}1>4fvDli!b%^g7K`6s3v!h0f6#LQAV6mQ(X%ZPf$QPwu@L+= z?-0B^Y+KT{*oQEXRuRheRqN!>6e_y0sY3w-2VY&nGnr}Jt!C_fO|+YbQHWrd{r zZ<{h};Vq0%KRygW!^lo;7ge%SME+vdxt(l5)-72rwuI5NJr*IxvIZ*NUgbyVN!(`CQsVN=Iu(qoVn6Pi?yy8>oJn|>4!Xl;Qjf2!-z1G*YVj+DV53y5TG7aWc24+krS zuC5etSo*Gca|W6=ZfvDFZu~^MM+#bvs1MXVozu{>3%Lfr9!uuv-{{1!;ZvEw0Vr=Hq>p>sf7 zjEG^225vOLHgDfF%HS4g<1dNJm8QNU5uk%b;iI=A(R)>_Qg=en|6@dbDif%zEaI8%HyI8dr{qBU^iVQ*6_lE>hT7EekntRk(Fa$|kW7&Z z!_{A(O?&pxlZ>RfbT4-S($l#{W2*CJsq4uW|GQ;0fQ=L2itHaz7Ch{buYf?cB<$dr z+I*HJYg<_%v%e3%O~d&XNsLA~pKSi<>W2%*YlE-gh@mrJBqVu<%JX7?n*XetBkiJx z0*UUx@As~$-)G`i^vESo(*$jSUF!45V@+@GkDqQVAFXpeOm@!T-lktu11~e8R;|kj zbTdD28N~jbkM55xT(U?qeM61QI|m-BtP46eS3jS_!d88J<=57_Fy0#sK3642j~Ui{ zG{$p9Ih7wGz8v^4rynj4F#!JJc{HTOXb^cKnuGDDt?uA#rpM5?0c9<1qM+eKEf(a3 zGOQ;yBvkFd`2ppEsA%)Rs@wOrc;;Rra;-R=t>T27ZVKvVE|#o@g5~q*Zp9ZG!n z44sr_G(*%-I-rdx$@(nns6o|sQN?=7%>kMI*W9^ERr)#aH6ZyKZXMeI7YQ!JnHXHEBc*WAHG#t`1rr_IlIoPQ=kIP$dKe0FUhkStp^gYD zXOG|E9%|F^Wz&gjykU2xoR+P&mS6C3#9iJEK#{*1xfZN=DA56WgJo}pQ68#`M~=Sv-MZ3D8Px3?On11dhlNp3)iUw2 zwBy+4yMfR2$^54?FPH<`jHBvN{G^dCFzmV8>o*iQbhM_v1<9 zjQBsVmOj6HMALh70;v}`7_sPm_LnYgyJlzCrMqu0Txc5M<~r(9ODT2JmzPvDF95&% zoa3JsTUzJ3N3!yUbz3;ch5epTwfD(mAF?7-BQ`QZJRV@`wNT1uFaJ|uUa1H z)yiy)X>^pe`F%LRABv9q8AD!^WC=i~4*=~qKx5GF#msE36x|__5Wq^&acRGQsFG7J z_>BgI#=B$U3}*ROGC@b3>cbn*`MVAZuaa=PhyKs?74F>EB`)h+L?$w+T@udB} zpuh*$EPhBt~6c1a3}w^ds{uy*LjOfQY+B?Pcu=JrnG01 zYpG2CMs{G+G?t^~dFeX86$6XayI0!wBRF^JWo0Ff6qDavx$i&j z2^ z%Le-LOsCB?-hf6pOOxj8i>S6!`P)kO_C39!3!UVFCC7t`{#qyhhaG?St%a|jrfe}& z=6|}R&J-#QfWG1Vh@1Wc*nS`0Qbj|Gw2D=>?P)j9f3E@5C7$YF3Lqc=_6$8y0d&yKWz3=T%VvpCKp26HDeSM($HDK*mHC<^? zF>G--4u>ijJcUpHYZ(e1XD20k3P6!99~h2FLEU;{0Ce9gY{7)D3x;(D|KRS<_vw4L zxj`_<@n7Rluv(|;=?#~GIDZqeX2(XW&pNHN+`w1o330Z0qyJ;g_A?K4WP&W z>qsGm44V(wUUUNUU8fp3!`d;CMK~crs$W(X792&kWr{V=YKVjb9rq)O` zSVERb5-xE+8cVuWY{bqCT;F&co36k7{b@j#7Gp$M_DSQyR{=z&(-W)_{_iSDMx;_> z4-j-WP85Wxsa1{bl4?U9vnz3q$yq@UR^RMXRn2ePw_{88EI~RL=t^xCf@rDlNSv3i zuQqd_Q@z1kc~a3d*R{N6WxmriegNpRdsV;Hs0F;f_P>7xXfF+UYs+G%zqu(70rBRK zD_3IWPGBF^*AK;c>A_@X+76^1V-CK*{uu$Z+NFl(1U~)c=!NkvcY%yk_)b+-)x$@Q zAP22&(bPVhhh{JPBt1gdxWEW%PWt1g|G1&+Cgc`Y+*VTU{MI!1yXa+- zjJo!qb&!!Qr}$ZsD@Nxlg+iH9K^Rt4ROC&osHm7`Sx2&a1NnTudw%86Hl;fw$N}Gq zJ}Z2}zkZ5t?9evL{rKg}FGBo=y@jmGf*4uyF{}4JgVjCBKfd<$XW zzdLP+5tElwgj?&Y+y)Gw-N^ql7*H*^FMSrw)JUcpEFUh2T)wV3-e2H;g1rg6efxIz zFyBtwKY&2(TSl+BOD#o|jdwF_pbUmuNeWB1O@&XOhbxOK}v3ptP^ zGkn3nC%2DM{HVyE^-O$y4N&0_*Z_`zYWoh{F^`;2fA#jQR=TSHele^=-Oo3L2#@ak zL5B-fC7<4wl9_l%RO}~El)=hzRsft0IDD7AD^D{ z#&sI5Q(g$V{30>RY~Gr^j*UE-!_9kSK4_IdP<>nFC3X}h!J^3ewcI~+$>dsBJ)=s@ z`mby1`ULY_I`$=jAYqbw?1vny!1U_Wo{myx@0s`NcrV%>UPgb(YOkXsi4ljHqI4jK z?$`U8k$Cv@;s>A}otPpYcA&l!yjip6ScXF$#ZN-cctS6{Z{o@nap z2Jthe&r9jH;hy{!S)4w-6l)pK-+u}h8KIe5{p60~WyJ@wv_tuNlq2VgG)qJY|+E>JPWyn#l8peVS3CS(^r49sTvF`s0aLm)v5knR&p^ zfV2PcxBr0)`0dm|Z^>2*#!gD{(#Jb$`8R7+A;$Y(@2qv@dD1FHA0hv9WCPg&hPQ&{ ziuHZ8$1T|hMzHR{QFZ&nNIpK~hkjFEvH^u=#FP(B7w?$=YkaN3>(2~+$b+VV^Y8k7 zW^3ed>rAa|$DawE$Ux%smDjs(Hz~+R^3Yc;Etx=^JZ)rTg!NhX9$Ue@Jhgs(`uM=gfP2wfjz*a){4ib%Sn@VCSXDrhcNY*biVI`vP^7>|Dy`K8IYruY z7Y!4A2qTmcwNk)oZTba6NAAvdo99L#97l$mam zNpa(&q$U|-RVz|y;LHIy|IX`ICs@gvw52-aF-$ads}>$(?HGEfer=J&GKMdwiFa4h z*PPGD7#k99Vui4;(`oIU-mnlP0s^B!x$)9!pQWk37cwJJE7YiLOvH#N9KrFhboBBn ztn$^<{TG9R?mXU0*0Q2X0}ADjKIV{;(oW?OUt#V`|EMy=+@j>CM zS2)Hqk%3HU|Dya8fSP)?NiyOgkKU`9aE2y613+|36+G&*Nq7ENJV;l@-zvz;rxe{AGm3+ZhEe*@iSB?8feZt|r> z9o#NNxD+o;1!D;EY+6-hDoOL9hCRm3-WR4RpZ8cnWjt_#exnA!v=TyXY`~vqWSGA` zn^SE)9>xRgc*2YQ1lG6Lg+H6F{3D3hI9X(c(UOhT0(wuKKMN2Rtd=d{CfyuS(b9pU z*AX=zmRi%29V~4f7$J6`>w>T-iDCM}ltU<$Q#HF1h=x&#tg-DVCqfMMBw^6gT&Vcm zUMQkRcnJGTmi#;AiUMktjO*!2=GvIoZ*9xQ9ayiC-(AjA6ZhKmSy@_s2+Qmm0KX74 zmlVBvwPkQ{(0!nMsL~0GnI_fAsmQb-f3c3fon(-7G)n_Bzo-;fZUn03`kXtrAua6k zEKwlA3w<8g@D8Y)vj^{;I#>X^cPLj6d$bJZ`M%{54Ztw%P>4kU=(A;^RZdzm>rH_6 z7XXH5+g{bPC6BfyApyiRRU=5~@q-Pg44%{R8}e54c0NpmCes{?1e5ZPz?M40qpc=yySH_ftC_lqJzbRNjx40iDP8Kk%nRteWH;A9_uYGsEqSSYw6te?NneZzz5Pd90lU$Epwc|z=N0!) zq6FsBASaR$%vk|6>mC$m;o_U^A71-Pv*@^35#zjt3N@`#$0n=-D#vK^@KGtnu@m!c z2Lgc6g}kl$1o4m($oBw>6@c#_;d(5snZQ@e`*cK&ps4cq#|09QPE)_yxwFD40q$T3 z@xmMaY!ZMy+P;4rkZXNf_;c>*IR^9FXLpRFrNPFTwe5j}P7j&7j{V>8Cx&|yo$Tu& zR1j<^-~8;;i@nqBS}ETV7_4b6;CWCyxkyCcS(2qZcnI!RCv%>RxQ`Wv64xOo;|^ zK#=U!f2vHyV1^ojYN1orrliq?cM=c;P2ceH>e3A^at1=EiagLKlLBcxW6k~i`P1Z# ztmu0wbJU-;S%3&L&axLJ*6L>*RLb#LB6{PL%ohV1OBnE@6-Y%u}K)`t;-+i zfepXK82dGpgHk>e}SY{Q@O3SYKvvO9}q z>Yc8MxvCKsxZjwb+1(bYzPkH=$+jI^HaS}u-n7Dw zbB)uqVma9V<+St+2bKLArLC-hU@Z2@)2E{8il(=ztWpCV%{xiKA}1Wn>R;^Y3rXIQ zCptSOnw|>U$d1-uC`+yclG(nDi@@XpthKeZ&sxms$`z?^-@cg?U%7HcGyy~B8s0nq zsIYJwKp2dNF7d-T9WkdK@AwZ7D9l)5^Yio0z!Z?z@P+@k>uhJa7(H4t9qCqid-qs! z@IkaFEN4A}H{uu5$0@sAwZYl>fr(<*hsEuIyyFbbw1DI5{05_BbR}YhIl0;{RjJQC zSI5{ngK(KVxp^3nM?kn-+=> z1wQ%naO6@MkvUaHrw9p$D__z-4-FB{!6SA9LhW5bkQW@H`Cy=8yR7zz>@W``oS;#F zfOGxLC~E(>mWIivob@c%*D@Bzps!H7UpXGrxYUC_ITsw=a|xzNaPSn1&hVc)TjkTQ zU1V3$9UMyQE5(QjQI8Oo0c5xa+FtD>rlPH4I8wF-JAJ#8tzj7}kV-sV5;S236e^ok7!cJ;xKOF_uSSGrA(#yti%TUET>4l0bj1;B}qU}aoUv^?{dNW#?Pjt zylehb$-5q~Lr1XSnWzMU;^kMN(N z@G0Ea{fs>o>pUIX_<$>NNK?db-^efU`l-j-&Qukv497lLqg;t2M+|#qti)1M|5uuL zk2}4`l$_}rOVfRcETbinxd zOrC>c)pnIx-P2#o=mtq&zgneVIBHieBspep)g4w=)|jpZCWF!~YXN4=T|c&FKyQ8p zvc-3>T3QGb_0Hkrg($KwbAmzw0Q6}GhlhISsH9uh~m10W|RI*RQz?XaT!gVnKd3w^EKyoM`oZIUrL34AxP^mGVm% z&d0c@G8N_2uB8#=ElkC=nVQnlox))@gR=eq(BEy}h3u=V>!M0>Uhr-UU2*?gWt_e4 z?U0Y3gFm7N08Y#2)IQL0U{datEmuhBjJ$aEtR?O$_(vcn1;DVC<$wfT>O~#EGr7Y_ zbxXP8&uvVpvxjv;H&6!!!mKKsk_o5#C!u%shX7^5VHFikqtC+Gy_W^IZiKv|I;U9T zQ0haX*aWxCfWe!LoU|LO%ZuZcD0yQNpncd*M=d0a?}UB{kRz*raHu^bR+AX~7%2!G zF++U3o2)V9R6Jp@KA+W){c&oM;WQi}5B>$Q(x_#XLYr3s7N`lxVdzu(pqOyU2E)ms zG83I4dkB?_ti?pP`!>;B_Ndu=o;qJH*S`!9pPEeh{!D>8!s^@fyZ*-2klq z_u>yA(a1EHv_;`W81IL9)D6lr7B|&!tx~+|7ACsYIc?Dvux^=n9pu4WEwsBUM^mf; zs3ZcWKQM9^Rsj$n?X)(Zm8}<~SkJBFY+<o)c%#Z)Tm=IiqtO5_x6hoLP)OI zRx@&Y8x4sgfDea82xqNmg2d@|1VP{~yI@o6Xw_4y;amAH?W%VY%z~Un?#7rOw=$m# z!|MZZ@0mEus@VRzatY*afSj>*_ei_Ad~bY)WnfXaODh9Fi~(1zta2aS$%wDoiUK(l zmAib9E3CJ9+^Vc;RZ#k58W6c}90K%Qn5NsC-3^737JuPbP;qtC!IAKKbkC2v9;W9{Tq>;W*K zipgVowSd{QGQ8@@x>hNr8naK~tFQDVQh`op^}AiM)$2eKXa&?{Ay<4T5oHg7iH7#M z{HpaN^?Hb~Dm5BW0wyB|eY)@JTB*LvW4RIe=)}tN{07YNQqO$K!D9}9prA`MV5I8N7nuc#81;c7#@SZV)!PvKN0-QK!WA6?lh zlFR}AmX7yIXKquW+2l|!Rt&W4(gXL}_%LK*wavoQ09&)30^=)IRgaQM$QD*rY0cr( z&c@UpzakfmNZT7dM@hy`712;rJye7JcYyl$1LPpbK-u6eD$2^VazFqyDke|lbU6fZ zCfyh1NlHL2Uh=50$G_04njF>s_q>Rw!Jana-nX{)@>)Ksp(w>V8r0;#k;hZw06(dQ zzY4CgaiHAQHdAiAAO)cyX92?NjRrxA`iSsYw&s?`!BHE}sqzjrqOid|H|-ZCOXSl$ zOg77imfNSbHeNT6k#@rHt^x#Ab=bS=8Gwx`w$%mZ2D~vJAmrCWI7ifQeV|}6iNt<0 zvD^_KKXo}bFJ_UZNpG1j;Oc3D288n*L3QDN9Orw69IyMLRzm^ax{>T>I|1#5*{N-l4 zeR8Ob6Gqxlu&n*ja@#ZfIyv*sl=-(6!^w5oM7LR!0#qn>ssQL7C?Jy$U<=23z56Tt zbd02pb+J-zA(mF@$ip>b_b-{uw)m#!e@M4P*CBB(3KT?Ak6V=mAS?y1L9|QnCOaUY z7VJF)G3woS%vWopbA2B&F*FApGk&}pr(fibvHoUX((8osBZzSU$!8C(xI-LjW7L** zAH82G$N7Vts8)H%@T7Z>oXRjyzdt@B7;MmowTDcVUb{z<)cFs((g+{jsgNPkAtQ1w z3a0yGIKf?Sd^q~TIS)d{MwiF5iJ7L=5!L;%(StoTUNHoR17(O;k{`ew4U#~>OAh#Y zwflSrF$jpbuUeC2^3;loW?ZCYqukH80M+~JfQ=x~8JOYBS35r~!H*Py`=b`M;@^pM z4x(6kDjD-t*j?cXr|{gbxs6aG0)%Z2L)vLEfQbecw(E0QYGBx6k98N$7oaSj>4`-e zenb#@t)|?=03IyYYZ(|;PCj%gGaJi#COL)e$ho3-;Ajoy-Evx4A&@MhT7g{bdrAju zE-cbbD{Mixz?VV|yh%U<7EQ_sHEy4i`#RqqjffvNflXC}1JjY5vLZm)2T&AFhcjDV zypXT0e7%|4eb~shK+KmqvKI2)bAS9d0>X-MlbLT^hT}>mY9JKWxpj%UF)xke52btLeeBDgyB>dL*ec|7Y;To*3bM<5XE61&J zOTCl|`vbG1W+SGKcOK%(1JhW|-Nz@7mZ^jAv`dg+r1O}d+;{NCDP5~q0pTaCssq6K zj+*pTOP{GHoj>b`W@qWd+}iDQ2TQ6{w(?)Zl_4vJ~?C4a>+u8s_hd2OHw8!U_K##VPvN-%!H1Y;!Lc@7rSCA)$w6PMO4){&#VM z;SZZW?@NHWG%JBj1`1+!GD`D~ko5?vRPMZrs2X@CvRh{pW6ooZs%h7LqJN0_I=NxK)FuNrjV?3p@k zDM!5jQ76~H&Vw0}yTk@r#?4}hklPM`EM{B)b3~9oeEW7y6EOmyjg+UU2Hzei6aZD` z3zK`CiP$vqvCIqp!$x31Y($0a{`R!g>#Fd(Ly{%J?pbot-yJjkY6(Vxw(P>2`JMif zzB@ic39JuqeJU4H9{x|m?A% z^d#TDG1OUFeZPkPN`80ZR-3o2JMcyyXXsr;1eQ1T&jMP*w)K36pwqoDt3z)aHu+bU zso$-YA{&pJ6?u;>(2`&KNwhqg0dBQ`V0{a4k#M&F7@V`Fw_6_Elc9e3)uBQ<)(-Ho zt{A8*C;szzAa;jh-q9S_`F>fCIU%lHCUAeN0Amjb%*~ zJJx{`7NWm5eCI21REc{zvTG*hSOz|BTz2Y2Kv>Qt<(aT|0paDR7k z_lqLO$+~908k^>HjX@qABb5Pq`G;6SVTgyp0#x(tfK2`DX5rmS+@FY7Dqd(UfpvX~|7l2A|2SCO(gchIl_X*c4Zi0rB#>!u_2)0Zc7@nD$XxrOOr| zFg^DOLNiN)aF+;pS;DgP*{}rp>277&(l(y}O6OI%pwX zK49S`RS6Q9KHx$gaOC(b-u}moO zh;SLpBe)U^N+JIQC+}~c1hkw?fm>(xz>_K?8+D&G<-3T}EevF8Gms;XUWpuT?2IfAuUD-kuv%1b z-H3GZ+8n;4r8UGBCX%1ha)beNKC;qXHw#Bm+-3pLubPifErR2l;`2+AA{C=_F#hJI z-*vN|aWR^?#Qw&=7lxaQOf@h>hndoq@ogS$v^UmAlhZWdFsNVrLxB@x;7e*y4y#Q) zs}u3|{DMgLx!(em&O}6H;CqXcOtXX%9FRr0+y!LQ$7^^xp$ktYpivmhe2|3;E=vxUSru^~O>L zmj&V5Ux6Q+kA2$FdZQUrRw9QuNC$W;FuqD*Um-Jc*uVu=6 zD}g8UUHgyl_d8|^njO?`F5(?0M*)Hg0f3NXW#{I;>zL(;%$n_Cy<~Qe?RTqp_KL!++#Bu^yM<;N( z-_EfDcN+ibve97cDH2ZNhvjBoisk+b+TQ;R%j5Ueqj`_enLk)9zx;N0#|K9|ma$74 zSWQksYM<`jWuJ78c)acHTL6pCjAA#(wB&>~mVK7Ho8P%zd%Ql@BaYEV>cXcaCTBT@PBTrWOGbBqWW&E6 z7AGgp%mm@y6P($v+{j-%E|HU88rugaLK+>+IaqM&$4wQ3X;(kiHx8O#9Kr8tE27%B=C>VTebkNoBL9)Gq?L+Om;D~u

!PwpDWp_R?_&!by1M;`E)sl+h34Xh=jbROIK%U{+#_)i9V91^}yAH8X z3!(I$SPp>9v`=-;@+EKt9V`=k?W8cHsDYPPsqc4Q(zGg>$FDi2tHQI`uZQsHf(g^S zcvtxuOn4S=&6Nt5(UO$@s{RCDvM}YTKMi(y6`h^GR|m`n`X2*-432&eGa4&@lH)bA zdIOjYl2M(fieOLB>*wiydx}G7R)rb0l;@SmIR`9#INr}ZBY(-+S)W>4e34(at4NK| zPJSafq^$ddzLB4)k0~uxqUha!x1rCm2N^?a%_8nL_b9YJ(#kZL8My{yXR9e*_-@o7 zT38ZDGW)xVHWFrJF#NoqUARK5d5jYFB=jvm0-XgJ1Uwh&TSx(qg)1e>H(p-^S8%)q zM!I#$TeZEZaX0jowLR`O!NcFQM;mWK?4B_$+I8<6O=ZZfq{nxx4YSE}Q9*W=@qJs} zBOYH&wR6y8CB3K=_!NnQQW|dSXEZBlrN35KIc)b$0pVqCZ`fDKTn3)NV5Q+;`yn-$ zI11GOF!oB@k5?8!?|~2(8@Z6|f=QR->4uhc<(rDx{%>rxt{cLS;-UGeIis8X<*ce> zY&pR)0IDe?y28tf=N|_W4a5WvZhzT9ZdXVNs1{Dkrb~-14BK<*aVE6o&tsU#4diVN zyDIgN1-B;Xx&fUn%-E6MKhN5w{(c+~Bcr>LpusV`uhh9$ z7;{8*(6acRwfh{U9$P(t;C1vl{qEwg4dKP(@8l=nLf3ulTN(?dCD*#OuA3OIM0_76 zY~muCxQo7(0zESTNFi)aI!;hxSz0jp#cE$pf2%aaWpuaU~0aWup55OWCG3y4Cx zHORp4-q2e0#JU+8hA|m6ttDOWNSkP$-e9Q@r&cUC+?+e!JesQu#3uMX)tuKzaKqKH zrzfAy`_7eUhw|i#Gx)44;?KZZ!vI?W>x2f5^phu|@NA>!uR@r~o81`rJh$+7j7_Me z2XEn|`FtN8wwZ67NI$pw_0Fh%3Vq8iVGN8u>hgOXG64S8{)U6tW@aOSnkCum!J4LW zXKVuCMDTGC?MnZ%n~NqA?2-Plb>q!)xSia5@11pHPJ~#pt7W3}t!Lj|^^fbD$_ES@ zeZoJ166DV<;V;d)`dcbmiSz+%d^Yz=p1^m*#&DhGIuZL&-kgCGS>4ZG)zRT5GNquCHp4;Hg^DcdUebL@CKilPzGuY3DtbL9eJoJV_>FbPS zCqDO+$@!jRs-C?$H!m-W-zzvJ)+0ZM{Cl3?2ff!Sm)`tS9nuA}FnbGW;arn(T)T<; zzw!rmIHn@5jrgH9AMbsmu>16;|9S_5Z%w2ov)e$>(j1+tNcv4{wvuf?13EexphM3x zLZ#!;r0>Vrth)C>N+I^<3d?jCVO&MuFtX`Pl1bvcGE%(V-M_PV4_XVT6@nRm9c1L9 zVy85ZIwlLP%9vRffxZ=WWGad4wNq0u#?`2=TT8xPmLKPAm`mR~BG4TX$G4UlVaWUX zq&e#c>8I%3ffIRYXEV`TX#t0{y_yo}9???QK^Cai)0qSJw%jd-Y@cND{5m&?Z!5WK zb19a~P9Ro+gDWnq>*Q-Hp6)t1x&cW+<5-QtiFa0_xuWX_ipME zW@LFAvvBiL{GsnRq6ygATc(Lt?`M$3<%TR@O@Y(3FVKS3Oclm0yun@4AHUnY<%}kM z(~mcLxZ!1Wr@`!t5L^N9ibAUb=8~b&OOW%}(1@$p_wDT-_ATz35IUN<;dz0d|1RWT z?#_6kNzv_yO;J{eB4N%L>LQ`Na1T0)z9)S)>a2PF7`1PkI=RP8PPsDj3#oUo7m+M& z<4mPjbhEAo_xJ~o`O#|ZlL;PQO`8S8Y(0crV3lhDymNm!e>>r2SZ2Ls$Q^_>u=f zNj;Ars&dW#^}4ma{m0h2++1dgio#xW3G*N>CG@R6ce7`m2gJ9{#%JC3Y`8zd$Y;@{ zr*f@-(>#}@qQognV#2w1cn>3V!=?5w{KlJ>M!W3Aej0Lf^AA=L-;rbN3OLS2@F4Og z<7qcnOKDWv4}H^ghPRREzM}5FlP2V2V?H+jW^#@C8cr5w97B`L(3${+(JCU(AE8Bj zSa&8$D?jx#wcKadvB!f~vRMss`eImN3&AdE62EX0XK%0^AgpzH@(gTWEw14|Sv4u1|Q;q`ohuJTgK0~BiAf>!}IpxP#jp59@fI+;$e7uKz!nf@O z;2EZ|uiB3cedpXH#hZ83R>a91*z}yR8;t8~*3LBQZ1nY|#^3qSJ%Cdff53KJveJ4Y zvg_S$Zi{DvCaQ4SC<>9oDh3Z zp%C9o;DpjsZivAh6@wnr;y=#tYB(?a7|oaD57Q?(2q>W&@bj^+XS$}kaT7=r-&K`>)PdoPRgQHx#Bqa9GXv0S*X*NZSr> z!MTy;xg-xt-BqQtJL9Xc;OR#37ls#e*YC19m?&8LG76g*ggNB}mwN2=s0i*L%Xl1% zzuz@timt(tdkE=7RAejX*wX39b2SHN+Kl#%!g@Y}vN@-K$p7SqVy`eez0=?OLs^SZwat4lYZGSVBko}OMeOV4`4y(9=OJ@* z7rb@%3A;s$`l2uw{N@N*0n{5GCKK?G51wfhE>2N&6Z-M`!d%RzyM8TaK#+jTI#%(w# z37?n`hxVk5h=rF4-!np@J<%9~ho~rxT>yoL0m$V^asCx~V$R=GsZC55A|$?9656*p zQ2Ko;6`S3M7Np0uIu<139nI-Gyel4!>nXpM;_lQvUgkD;JrYuKls*G-GlzIkiTr8} zksFb1jEP3CYss;teP?Tbggx1cSNc=UHxFE|QGnF}%iw&c$B6(_Fe2F&3ywV|=yR8- zX_~ZyA}ARjBF${4w_8oSXY$GiMyZ335@h*1HzX#P3@7+=pixr_5_Gsf^Av~1LMAen ztT?M6@If!{^L51w?3@Cs*$V-k51c2f39Gow;5~8tD>D;yW2*DpTB~tDvU_epE|jeo zK1X}unUHu;)Cv=e3R#E|r!zAKm`epE* zOck@H&R)5hPoaeG!{Azp#_r;0Sc(5ajmm$9E?XLIzA~GzE+?l+F`(j1tVusrX+oQd zMlOg{%ZHo|3Uz-S#{%hL!=HV#Nx!?45Av(b$;6DhE1AlJ1M+pv%S6W+6lF3I$<#G* z&K`T^{ld)>YFLTFAfHtPlad?2`c(cQpV9HM&GRZIGoukr!$?HL*U859LgBCJ#MPDY zo6p7<#aQIt;a1ubSAdsPm9=l5GRu(|jcwTsLf0$Ap$tsrqGj*CC@(8SQ-<90N15+D zfFdkYE&0Kzo&RUuue1w}r{TV?j(d_yi@FoBqVMFf-egQ!KvzE7ylKo=aUrHkDDeE@ zA_x3(ufPA6`?`|!XzxKdVxW#ei0VxD5nH^+^H(iPw0R*tu%j!u>(|KaL&TmcG*eRj%4 z_-CxT4*-fm5#I!uJpNw=GH{h?8M>zGqAIJ(sTIb!^HcFWXFn_ZXZptX(Vw z{z2Yf4~T3b|Ksmv{2_&4xg3?Tu&`EhGVf}lKNF#5G<67Dc-FEM{meCb?chjv_VqQj z4}6dIi*cBYA&3)|v70U}(%L5Mhdo$u@nSY#qL;=*HLke6ZRFm2eSe*ozw0&!W2Q|I z$^Ti_zxedWOb}eosv9x3^YNyqf7ndOJ}iWi&`sr3wP#q$$)6MOm$YY_E<`&_OLc5M zPIJ$6xKa9iKvee6Fmd-4;5K5+Qx8O@UCk!(Sp+4OllJae^yfVRr~2Fqv}A)`p)IlJB-pA_=5cn_ai@o)FR#dng#1G|3{ zd-$pRjQz%(ruotjMyrK5#cj>iz7ny4WW)ol@8*xd5a4whV$gYTZ~wAB0Ol9{*l*l8 zWc?Gk3{Z5OoWD6)Ptq7=Pi4tQ@x1oW(}?T#r49NUDd@?7^)z#tCxVUVYTn!_s!%`N zO4WYc$2iG@El&FS6e+H*lGdL=Bo@B)_;DT6=y#l3ObIz{m5{Si612Eiljm5_RRN7N zaUzvoZNcdIbB5L7nzbKoWHcjKEI3;mveRTY@X7v@@i5VrKRIUOP`cmhcB` z86fCT5E>5q{s=>WrecIO&!bE#OC}7WX)iLR-{)f`#r>MAeMkML<+Ne1kpHW-bIjF= z`vh$wL2@u7e*5Ijj@`%XX^p{=fyoieOADIUPu~ixhKT=24g4VXVLCzaN8O%gZXD#% zy^R}xjzyXGU)uOhF&1DMMnTA5OhxWqg}hywBLgWU`Q>mdqwbh0v3YtRvbcQWC;jJc z{aCGrbuYOp^Z19Ej8pqH8SMLtbiTdUKc1|Ccf+kqUL>1W3PiHDA*NMBr3X z)$$aOk**y*Q^VDc_%B!jUla(tsr|hInbVuOaIQvy3Y}>NE0?Z4$zdpm=~{i$qx*l7 z2eFIkAr}&g@2O|Mdi&ZNlGR`5`mW#)a%S&k^jBoKa6_88% zj$}P#YNyhV9V{*Oa644ARO`}o6=mP%9!0Vc)|-kK7!4YqP2zNXoD-nO@*1ha)`m0p z#;~8af>5c%Bw=j<5R3%1b{q7%cAB}HqZ+Bp5uNdhefAC2!FnZ+nz9jDSFM6xZF-TsY zCv^1d#;Na3pF32Za-wD2A&hy!()-ln@&W^18U5{5K@>KKI@5*Gi2btMz4>5u``>l@ zKqa6Kr;`2UqpZ1Y5KS17Z=I%Hi+}5ZigdCK0+OY2VA?XC?|9>(tJ8~%@nHHG7aM}v z2QehIqH1o=tXK5xH@#A%3m8gLv$SNjA4-n^(>ZfO816ZyY1P@;xtF5IeWe4l%>lIk z7%VL@Z`34vVMIHL43slDR zp}lj8N29n3=uqoO>!&L9+ilDbzE;Z$PvhLQ>yKf>|^_2%mMWI5SCi2%nU$!^3?JG}p&>G33 zcEDzj-Pv)j*5XVQSz88lY-i<0AZm>B=KExUC!k6}d*T^+Ut&m8E4n^^Ga(cgi-t|0 z3ndteR?i<#Gun|dw5GO99I@j?UL$W98>g129+dQrsCGFE~}9FMK|qMGlMtEk_H$HU?yj^@EyDpv^m4|wPo5 zoVc&3T`jZV=4`hFU0etHI4uVbrW`N}PcVG^zY}(+vJS&6EKjy_&p{W|effj^5Yx%; zr`E>CfaCM<)yMY%mf&Pm@!`+z)JEC(KQ(85Udi2mqF~>0Q~ROF7_3EJ+e(ycf-Q5@ zM$Md1aJT97wwt1ubprKF!~725!4XOagt4%AgiaaOdQ0ABFiC>$nWS4Hy6-pIlB%ml z1+TSEZBhaN-D5HwhSR~GV5uo{cM%9IymUW0Jl3T9-3vPtq+QD&Qh;kpJKf zJT{j2q-kRN=8~z#{rj4!v-=D688c^*9n6Be&pTG-=pvu}H5AVnTVjAgia)dvO6<&) zz)RDmOG7T{|9C93Cv9!SFF+!1e&V9unlI-(#hA7$xI9~*-1=mC7 z#IB|;23s98A|$iOYf5n>J z&0vD@sLbOj2}TDS1|$kTVa8GY+D=%h$19x^tQkv|!Dz|qw^574P=b?n+z8L$hkQfD!NKG<=-{Ry{#P}y+$@KP7C`H;j z;B<6>p-8))kiZ}N^+TU2NI#qKY(8#W{`Sc9LcFWn)KbXZ^_}m+v-^@PX8O@P%5Sz`@zed@PT%ygHoC)BOx(H^n~%&#SX=mpwdoP% zG!GYNR>_DzYiCQzYEVj!40|s1J_kR|`?Unh*;oO&0VyF)3}q3#VlHK3X(r=@-@D7a z$MeDv?gR#MKw+R>mmCEI3uBER@)*c%^{?Q4AtzhWvem6>cb-Q9&~n^N%^T7E~Fku zOneD8V<`;<=Lx0i{(xRW=NCB;2Wd-Q9Bvgb4GsY>JYbt8ser2Tkehz9zQ zjHsUC+8Y8*nc?Edg(KNg!*uMeqtRa8?HU9uaW#%M5bqF{2fk1z8X0`SXzKU^8!|_hbj6iOwtg%8wi8MvtQvT zYcL{3ej6-TI{J0^>?&9b!@|VGd#dI42vxm z!=c3W`^!x`@9CHJy( z%~w_3&wfyQQ!F0Hi=yAJS_e*~MTdPkn$L4u&zHw;W_+=gU;1fwsoBSBy=s+IBJgDMt1}x(D&s?2`9oca@1=^TBTBtNqggTy{Ejb7r zKZXDx5O|9u^?VHC_~$Tm3aDx{Sm}}pklbZj%=u91y03JriLv)(9SQ# zG6KLanKs)K6q%IO#h3L4FwHv1{VtH1-k9hW#xao=!F5PITs_30-CF#o(EBl0A@$E2 ze%qW)a#k$GGp7P}o}8Y4fCCL-y}2bfR0#m1*AU51qe%Jv+Lc?w#ep50I*xeIVH}Yk;vLq@&|F19j5|ZJVV2ZbP@fwY%HD2|yxoe7L7oakl;PJfA zD}P9OU|}9=()NyQO!W%rg+;*98?4U(edAp|=u>A~`)Fu&g_N7I+a-8`9K7nrWf`H4 zyjxhO5dSS<$$s!PVm+Wx_^6{oxR}88)$JHD58@$Rf^4@hT(7OJPCHEt6^jL#uo{b@ zc8dEMjnJ88 z^&SAN*4A1OVzEc-?L&R_ozfXYp=JhF$$w0|&kL~2J2HM5;fE+$Yzwh7g6lRN&Xnl} zx~R5SFL9ZdCM&ZMtot0F1G6xWCxDHf#dm1YQ$(KJDzce1s>zuuj6?&;@U8C4AXDvb zwC4qugMF?6cHtv((4&3>;D}S@e`O14vA3)nuPX>)sxA*`x`UhuPtC=eBjO387ShL3axHdwbDo;}MgUv;%MHVPR?__fDmWh!bZ)&3 z%RnjZZWruZ#vIs#+#OFxJG+Bs@&O~?>QgYAEP#b z;!l6*WA=o(Q!|=8Su3xG)&3i5h5`I^hK)L_b=F>L#2ymGB|FiT4%x6rqB^q7o_Jy( z2z?YM=KuCi{4P<}FwShv+&n&K0rV9Fkm~qstyf9GByg}Q^RkO`!T$AZ6V@Xt*E{;F z$KO52ZRv}@?9EPPbyoFoKR>=XY3{>qoBbI7@m_JdF|eGF9tlNGo1rQJPR{X5$_yjT zQX;PN0?KhOw%xkk8eC#pyzD(Q8LRCYA~bomhZ0d!h@y;+JgYXdr)w=v(#hTyJWoq% z$}Nk(E4e3ubXZ5Pk+QD@+eXRdN)+hN(Edf&Z7y{e!rp>fnoMhnH#);IF)B8QLS9=9 zytQl0dY%QHCRPh@PIJnmR7khD=BfTpHTtEVT>j^UF}i!`IaZ?Z*T3Rl4w6|LPYt#z zJ>;R>#rAD^b-~nMPESQudFg?|dJyuvvYoTzi#Vw$IEs({?d%~4ZAz>6BXwC8;}vBI zr4`gK`mvrBnL3QzJhEOV9~7>8r|_!hM2>nZCT~joG53*mz1^*c$wQUcjgC^t6AAW| zi(-H~bGH8Xi0MV@J8o*T-iAYGvZ57YR{`Yy_{buLy}xOu0};!4aQ)7rF(?o-8+m*O z`Rl@9Bh$k)l78G{)W+xHHpB(#?p?fTXjO7nxe${F82J3Zb|3D7+4Dk|7iq-}z!e+N zp0C1+;&!Y&`I>a6tMTCmq`Hr2ZI-L{m^30*;D`uc0|Hn3y#^_!Bu65_cxKTy&K%FG zDZ^xH>6>K`Ww*brntx98Gkay+4j8Kux3|b0i9i+5bnK76L4cXy8tjl z)Bn5RJxnMkHxbCXYc3LLxo_7Qb4j zWt!!W2NT3@(#8Fus>CL9nO*P$8fBt97+0it@nd^K-nW^CwIyuPE*LjdxmEF3qf^Ez z#uoIRWyjcJbNe=XCT(uV;<(S6mh%t{v6e=Cc&hLUY3=qzpsp*flFPBTZg^6TPJ4OP zXI6xNB+`J%zx7s(v2)Vly!$f}!5vQGWAf|)0vZEzY8lVHhSTeco5PsE)_9KzdJnK#?to;weI0MU4tswA;M?V#oR^$L zavgbuGQg_c%9O&<_Y7#Ch_(v7v=k&5$d2!%E)A=9ksC+cUf^<~H+!q?x{YmApTAr? zdG=m+t-1qoLGnz^xIf(4LxT6L!EMFPPUA|26gO<9N!mz)k80Ajlwc%_V3Nh|>An-v z-Y~)02&AQ#myzHagNBWl*NdW9FS&itXRxPxTd5&tks9AoN8T1q8Ay572Fs68f`+EH*M?4 z8eF3tgn|6RMRuH6TFb}2`d;1bn0LH464HZyG^c#T^BokbgIXhS zBG{Ol-tv2Qowm5GTi~sseSljvZGTse74|U!#R#J{E?al+wcIy$L z!GKd_!yc$_?bTvtB2zpu(LmkqfL455!EGh5Qe6I3NB2i&0gC!!wDNVulgL3YYRI^N z0u2?S@6x_Pd?KN0Nh=0 zoQw(164tm^WwQdiM*jC)37wh!H+8`XfGa~FOr{}gm+-U(cN=mGnOzyk#GJ0 z$|h)?&3{Aa_YJ$DHF$R4w+qLRP)mVzdz%jjF9|D3FcACDzUpcP8F2jhCC&~)2~*ea zsq%ffp{?>|9LK3DlL!Y*-$h}${1jZBl*3T-2~Toh zPoau4bPb;@qXX_!^)vf3q#oAg3Roqu(re^Zwq1`H{`EDV#m}Tn`j_fz!w6Pf1s0uU zwy7}lqTTTaFi+Y!f1A$M5NSZ`Hnuswa`bKWM1mM(FA$fF}Eb)v%LXa?2#otWJYy@&yN%l{(K`$uWaUsXTN z&anY6XWtWJZlZ71RNTLcKY}^sNA`&HmM&O-YnAWYbEg07X;8wH4ER*ty#o{o%5(Fpt4ZE{>OycPPG+CV0Gib zbRM|L?I2PZ2}q}zH?PCG5riBSURv-9L;*k-hK;m&oeVtPAw zIKATvG3eg`JUubyawYd3eRE;Yr+cMtTgt}Vp~cuY>{IY^Slo|h^zF2sTcFK zTJJd0?wYGwFGj-x%3x^eX$wwL?-_@b$iETu+NEa+2K0j2(aA7Rp$MBd3*oQ`M1`3D z;+XGwnA}i&Ce<@Cn{1o+d*yrcIB4Hl9a`kOevdG&lhkL>YYD8DpkyFDV|PeLwgN{Ym^(U#7C>x)Ln#b|8EDa&zXEz?OG%m*%APT zx9uPx=vKSf>AautR5R1EvzXsdL|V~(Du2G$ABILkqF&Dr*^-y7-K_J@r_=()Y={_k znvF3FHefk<%8l0HMW5PchuZq$fDYK<<)Pfd0EuI4%0dX`(3yxG<*UR&1T#r@F#aflwRReA%h0{~Fw|aw>2DqHjh$Ybeizv;!8yt(0BL$5~9vG z+CUub;2saMF2c*&_)U5f=j`;A@FjZ`=w;r=ac_{pLXa|PFRsSiL=-n5KQO`k#*|*r zr-H05I2AbT{c9(4W2_ILh8kBY-`_{yji11wc~FbpXhK;SIFLRHa64YyU$i`!vM(@M zGjD8^w&Qe~GSiotyhvD)qhRZ`(mKf$@DO*$x79?R)fK_3ZE8BiTSwvP^30&P0A<$T z%;0`Fe@YwnCgeQ-sAz4UTRuOi=YXL;iXB`g z5Uumebjn;|(gipYcWxQb_^++h{FSCk%>4*wL_U=)IheU$ zoJ72VY@grzE^SW&LjgTNH)8YCiR3kXED9wsAX1O`FP?wL&|Ue-gfaEd=hMq#Z0de8 z_sx$nLn+z_Mgz-A;rA@=s;zd>c64x7safenoUjJQ2w!gf_V3f&vTx^-#eVv$NGGby zEe=vRVNO1VtoO2m zTo$DtiN1_Bf%f7Vvhs0?EpJ!wW87Xqs;U@q}eclaFrY@ z(LJXbLRfDHiu*qQOD50n-`8&9 zf|ZsWD$64}-`iXjAW`sirkhgQR1c@znkSV*Nk5Z#yZmJ@J+~WcIJNOjb?n}?quEiG z`_^83D9lYEB=6et#593oSQvV0UH=A{WA%UN&|Xh&v9$JLWRJVJJ4rnKiLqQHI*F^AOejMYzQMHd5X2s%0oKWwBXnRU4A`K!cdnd0`wikIs{jq7>%EyKpQ4F zPwe*|k3WMtw%T)l z?iDSO9|`h7-grkU;L$*otzJ?T8y6TmBvj1*qfhfy$3fqEuv|Xv7POHVEbC@tY;GdDcPrh-hLWY5#q{np_#V<9(t<7BE&Hz4=z z%?uwaBp~vLq9dvV!LOAcePn_OY1A0LFNkNr@yct<-N$w}pno!T$A@rM4y#cXjTb7(+YW;$E?H z*Z{=!P&2nq4hSEjhEp!0H~7Jv9gg-3i5L}Qx4#~N;NN@aZSdy=6H5T77bI%X(cc;< zPwcSO;9EPK!*^&QfH~ZH+jgHjE5PVntTRZP*`98Gt!e=Y!bMZ|GdRaJuJ(0;Vsf0! zhdEyKqlT%L9KiZR|E7hBL_${{yshJk|6Z=X@q;Ed$lZV@esC}wokhkE`j*}qrUt0r z_Ym}IYQJ)4AezGt3fH1(Hax3b`C5P-EE5vm00Yb z7sllx5>E^RQyJ|FRZG0Rg60A-iuHz!^iT%+wAYs6U!;xIvKN@dg%h6>eVVQZ+Pl$f z@7V%q5qV-Kt*CU}8oE@#b@x;m+v0gok*&ap=j(RAyYDwlTaga< zdINCBc57Bo)c@^}XSb}_Z48!KSkY-F(=YPMg)kr4BoSjGR;K9QWqY8yZQ){Mh;e!7 ze`$3hQL)^s_VUx0rVI`7uhs^7c#KeZO?92wv0NM1#WkLUJLoPk4;QBQ$x|jMfutNq zh7s~JLMX#70(Q&}CacQ<9imeE@*5cf1^LKGcj_)JAk zkcTip@lFz9_mrVem$Zdq2RHi%u^-RD z<^Iq3a{R?2EZsO<-48+stb3pDVAJ@EU;Sw}I;Tg3DIX$thE&Nh*ktyrz4$&^`0^xkQDO%O9~MofVKs7FlPv0OWl5J_Sq}rpjhnO0#qc=Yt| z_%4a-gh8{v8md%;^O5GrSO3a&>j)TEPZ9-gKjcPl!obhP77+VQcR{B{^IzF61}TFw z%&;jZu0(JZ(4uy$Q2E;iQQ>maABC;zf5q-2vEt><&t87dE#X2!1Zha?jHEJ-g5Cd>s zz(6q30lEW%Djm%3A#(Gv?!g2**E7oL4_JIt);j? z=Ryhyo{V1q5))0z4ijuJ-7`8%D?u%(33{@R?Grg(X?IXNhwzQZzDK>V`aLL0+tbJ@ zso7r-bq;0K&LFE@;&(kzx0~sYu9PdAGnmtoSWJ1U3vR?KK332nC`FtaX0-j zP@Y-?%K+{ImC;xCT!M&Gy3f9TwE`P>{MVC>kAUpj>UP1K9*m*}M#R&nX!7 zfJ4alCg{owh0|HN6d(ecQ>OI?jj3j+)xc_ndf%Fi7-nAD3r(r1pi`yS94{lqsh;zV zW2UI6JHQdza z8upN7PLNtgCeP7=83e%@u){ z%p3@2y<7B0saQ+j6j8BzyZ^rD2SQ?EVowa)Dps((o#6Jl(jY>duX*E*g)lwPTM4NM z{#Md()JX#CB;s2ItN|2I+=;>~{LRQw zt!O3^?#MKd{wkW=kWGryU**2|o1`Ef#Rk+)K9f6v-Zb(S_}-G>e23A;IZaJWK%b-+ ztZ;hy=Fb75be_#Lxwnk*Qfh9M(CMMB9Hxtmdm~(Xq#~i;}>X&=W*AShpJi86n zc(1ITE_g^SSvNN|iTyE8xwu;W%WB31uF(7I^x0~>f)~3(x0+zR9k81YM~REHU#)Rr#hh+x_yS{^EBt#W$=@(baf34zP$u#(s?ovM28g$_G1{FqeEA6j5ldU&8 zoJ?*(IX|Pg*QqyAKTc=Qm_f>b{xrWEnOL8I zWcsi>NMeD!yPFG{jYA+qkv9_nI|hqDn$wH06bu_mB;*)vo$@wJ_+CtT^4bjgG{O49 zojIRoR+MK2{y&KN?s%%-_kSajy-tpmy^cMyO4c#T-dkn}afD>=aqP-?M|Q@sHz8Ta zUZL!8B3mjU;dh_v^ZotTqd#8G>vdoEbv>`=^}MbN4ohs#dhVRa3#z7E^_6Bu^yrzl zE{QB92y9B5Aj*gqjkV7J(;&Y1wjF|jhD!-j{kou%)s0=i1lBTe^?N2gmtlJPIdwy> zva%A)lNx{2;l$MTXy^qW_vf9C7inpZ|IYn`l&`&s1<|QB9eJ){?CoS4-?Lfr7*%mK zu>^&2W5DXd1Jh!kYSg=;h3*L5d@4C2+!*m#s(tANTtgjju7lNB(a?S%>pVe&TMf4t zRy}Jw5`|#mNU}1Jd9DAk-4~oP`nU0Kr6CJB!UKl!; z91(2l`O=VoClg(@Kk^-72$8M_02fuyAfz;#eDYy3RL%8^Lrv8wHI>&f3=(}~Ljx>Ig zwVZ(cF0j~Z&@>W?nMK&gViu2cDlS(aL!n`I2oI$c-}%Qk@YG)dd?nubL)3LiQTT3o1(1hD_m-Y&B&^aq37vzrI*Hhc@7S%FP3{ZPKSIXiV$ZolMC!YX$9MnCMPCp+8FsFS`L7JZ2^W=2Hvd44*8@gf)h;UkRM`WR zU6U$(Gq+p*LOQVE@4dnkxrsOQM2)zQT>7`|qL(rK!{7mFoDJhC9JgH!O34<~iQ)8@uz0yNeSyRt(7)m zHA)RUlLt-byLM8G+rMr%6f7CfHhty+EsUU|33^V6KGxtBn4sU^ z&fV4dlJ42*obJwU zGCnmiyeAFJRnAxSeWkiD2GwQp=Nd8i{43b{9&ba>L0$e^o+ux8(ijJIFo9`O!e(;) z-6{(q`LGa)Fdpc)YZZO^pN_9kUZC|xWBg@naNcnKf9IpYEGvWKNz55w8J)w#-)xUQ zMpg;YUR%jr3n#80{x0&m_i71f@YCN{Cs~}7G{}G931Qt3R6$~%CW>pfk*OI<>+aX* zSMh+-H8Ccdpe^awa79%q!Oah#aFH44wy{cLYGp=4_0HmC{0QHwCswh{@Kw8Uq+F(*(B8MThi3p2E2)6?svs=vPqjqm~B}5iDis? zqZTR-u^rTge|z4=#w3p6wT-W@f^~gB&H_eHZrDs3yPKad19X!#aPjY{TA21hJ|LTvhxp-;2?74}k74wyk>4f<}2t z$I~0%J4jQ#833=oug=eOnD<`;TecLz7UK5)_1xmtKGM%_o5hl>lt%{hjMDApJ}mzfBYR5x==7iKF;>{%kd>CEr1r0pO(UxV&pC zcKb@bw;k0NnC1l0C3Ah`n*5%5JP(Y}K5A3CIy0#@_1xB9GaTNI2C{5jk@dwJH1!2S zr3A@;UpUoMAZ-38rEgQziXBwyHB=?|iEkw-frV_8LJRqr0}o3++P+% z8s8BD4RR81O}<>zSn2x~sxAZuITUH? zDL{hxwH5s&>O-tX$%+#+gNSY47Haa`O7!hhrvID-J|&r{_;TvI2zY20+yuO1?`Nl+ zwsw>owE4O^ypA87Uc|7p4N>5*OS@gIgkKhJL8--bf2 zuZ_AyR&Xb%2lqrG3QjzDU$50YI_9^*Kl>s;`*N|wtuMPQfd+yg;lU{+lunli(y;Pt zJ<`BG6!>)eGBHuvT9(CGvM0EzKK3eu`6SH>Yd_Y6C$iHn{D5KB&M$UzUfh6Un0g>wXUB8V|eWJ_727L;^YG#CaTGYz4hXM9;zVm$eH1K(&$GK3m3NB z*cFtPOlvNkPZBXBq9ISleb(I27#$ackwoUSAeo;+YDGQ=BkUnD8b&7fr1uHD2Ca|D zj%9drzP*&8q?$zah$Ml8!kO85L6Tg{q*hnl0q0|GoUN-4H3L6J_FhC{YJ{rlm5_-=1;gto|3DnohK1ZYX)!&5k3U^wqe3xU zBqj9dN+x==`_Vl|IvpG>Ds-)WYOHgkEmRFSq6Ts(djcbW!L+lzG)w<}ScAOsE6g5U zM(=+^Ey2)9p^!%yb+vX8p&NMIC~`EvnzrDMevB}qS9|r^+&!|Au)rA~Ni4S<<12A6{)j0qC49+>;hQMr zs3~IbWyG9pPYZ-f8>j5f@m)kGg$mEiJn01Q$#d`%%|#M8z*(7Nwxi=P-){IO~OnZ8zL{@cS-!q97EFtvpmB>~dScw05sDRB0^ZAP=P-COV0v`8rj(&O1Fr z3F-(DgfGUA`BCg`N~@aKR4}F|KpZsQQ_F~(Nh}ikNql;h&6#k z8Vy4hCjPD(x7}?zTg&W3Mm$Ov2vxPtkVq9hOE_0|3)wxNYS_=8sLdq)Ojl{?I_G>x znYv^g0s*G0{=NZe2)nq4?$>KbLx6JM7(W^T4h!yTI@};?;H#+!JGJu!xVC)UHQp~aL_$zH@a zE0>~lUltiVtCuSad!MNSM#9f>&lBZC275lRU+kibc!_;K-%NO5Xh?eXc-++a{dz2*##rH07s;dl{Xr9HUy8<`)w9895 zj{AV5bR6G*azPo~d}F3@hCkP|bPJgjr%yB56t!=7W#LR;UG*mTr080z@}yyrF|T%! zF<(;%PZ0-kv;i^UbC`K?gc^w?^3N3*$BVmB-S)7Ym{zC097Z(>>-y_c5@lsAdK_xw z95^Fi1@X}j#I2*I?~*t8GEbr@NV#(_-lg^IvA*l>sqls3D)!Mnr!GW*&p;fWo1RZNr=sqVc zYtd&oM}p}MM%aCl-WKRBlK#%K*@6a%hd75!}v6275C}?U9e77A}K?aY0*9-yg3jsoORToMhM& zZiT1lg;5O_{>Su_XEVW6T`@$zFDV;#MwHFFkXa!ogM)&S7ccdr`Cl;`?0?~Hl=M7# zv5BYOv2S$$iU>-* zwq7{l;Alzrqc>Jw*(ny;VrkfA6!emGBZ$OKNp(#-`!Ic&|h}7?S=P8tAh3{e(27kpBE=|>~ zQjXEpVve>fmR%WtPZL*#8mA{>tHLdTSwo%XQ_`|kme5iK(s5SDpm zmTdndWIp$`w*DyhTH14>#&XXFqWRiKPrOg-cVc*1X^b4`#Hnp)gi2KHxj{h07?qx zqW`{e${X8Ncz)6;>LWzY5beHocj#=|%)Bicen@H9Mkd#IExH0}5mGWm@+QK-k+DDi z0|7t7xcYTVTS} z!NiEY0D}H|b;$Un=KcH%u5`HDbLOU`A>v{;#`SsPM1;%&Ns;b(z!&><%Gdvla~!ih z9#Vjt$TNr^%HMEXEtGHjdg!!&QhsJ37D*Gpbi=yg@HE2x_oLq+e*< z7Ac9YnzsR!-HcdXiK;E<$MIkU+i?Idi-G5PN_1)i?2~V2pU}hD^HtTQWUimd{M*a= zGfjJ}D87~glb+XduxAyPJGWKv3;0+&$tKtpe9$DHLFDQC=X^Od7rK;|-gT7`YYwBU zE*dP0@4YH9CG7siUeJg*7Q9+HO_bo5E7eNwylIE z7I&*oReEBn;-Tb7Nmo>JCOcU_e6E`0ub2%@I*SDG3i%IVsA{HpGi6n>SMYhoJ-pA2 zG>tv$BVc=_(D~l6Itl&{MolF!Qq_V zU$Q_<+gr;$^WADe)b*a0m9l-u`<&N`t^z+eO0Apo1GrXR=YNJCidsg;oiZei&*YU6 zH~r+aT_7iJ{GgH?<*y+b?Hq4)ytT!2b+woXqa9#t87wv8jLt1`5>Ek0wl^nc>ZY3H zV;djK3EJZ!9sIhi(9E-r@{F)=neqU)DPli7@VCggq!)ry=!lYdzjw^yY1iG3Yy133 zj&m$~k8ix{I60gZ*uV2O)NU!|EIRIW)Q`ug8F-OQjJiE}(sdQ_Hhkku+r?Arvxy^4 z+FoViZTG7*MhhKBJbViK<37RStl320}7>JMEf+vtd->}B;&{GkqJ4#*C!PE z6<*X4$wAl8`yVOu%Ot4XKO|_-?^)LGoRTNlzm)Lo6UMi)2o-5t$Qf0xzy21{p4g+p zD9k+hbx6?GZOr%Q@2789R~C2{q|JQ49%i6x`mD{KI zJ!;*z_#ck-E)P#qD~9HsQ74||8B6GHtQ7cgU#6WUJV9;9{_^6yWv^)=<=*R@Ll)`( z0L%bD!)`5C>!^t~@V-S6?%k8wE!ljO+E5_qcJ9;hQPR$KGi@^jn_n$l(D z9WjsI_+FTxWY7HzUT>@6epw zXClU;{pUJ{{jcJLybeXGZj55;@=1lPHfjS{BbL1l_wTC7)0HOy%Z`F*=P&zQV@foN z(3pW3)C^K!9&D3e&m#AJ9%i}Hd;ADR5&I>~m&R0@FjEj4V8Xlqx;_Zas!Hoz`X~f0 zs=<8&xB=R_eRmjx`3H~!)EV&0h1qkMGpZGdl89$3afDoROS4_n=juVat+D=078kH- zvMrppgVISKvOn8?lAB`fad*2LUH>kJP@H-Z0HpZB_^tdiFLv4CjWc8jWi9>%sG_>sOD4+_(X2psra-D z(w$vTR-k-szV^oEmf(KG?Vz{1haxrg8Y*!7jcsek?30V)&^|GRx~oJy?Fv=5w{AXW zWiNR5)eR^cVx7~d&w3#{glIZOkU)A}tA!hRe=AauzP{GiL)17Vvex1Jpy`Cho2}qw zcz#j1COm8%heS35RYjyXM?a3crFcxn$eW1k-E-xJc!!O0efwZ9=a`r%BU+&rVEeK)l9Apu(J4DwbeDRm+sJ@Yx;0)7@e` z7XW6j;{C&nX|S7QyNtp^#4?How2>+Wip)IWEh&166_Vtm@4;#D%WaRpkJ4W{Sgn*X zl6=g&Er~8)EO%3h6%pcpo6DScn|#ypSSUh|y&&&@Kk#0npCP%|0-J%t`;vK^ldGlIy)ZMQ;({`K#oW)D| z*FhSyO?l~-HhOxZ*bfY98hjyfYlXUdl0T-op>epvEqU?mZzHqdP&y-Kx4>iA@( zTGGujJ>i7Hc<#6H&us~Uu2rWhSMIHVJDLSj7%qLiNv%d#{ydfT?Uwm$EbH*~_n()xJAzDRM~q2_=;t>%SE7BbzJ=x!qE{wcDX1ez@8OfkJ(SpB$x>y2|t$fFE3zHd2+&7}pgydlWQ66zJ^^ssk)Ia3ruNSm_8nY55Q05a( zxMkL)XA7khIlk-J*`K;}1uhin?eKU1dmzYP0?_Mgw!2F&R3wV(MEH4=B4SA~ z+Gnb_^p0dOi0BONc4;D#Ctv_ zN7@e~TZ|uGF_vB(fA2K}Z+ArL9WS)G;CS)|gAS_NA8${)Pe3w$g=!m@yMNQ=c z9pA+!E5_G)8VNylbYm#Vp!Y|P&XE{pk>T^bMv*;BO$vZF0szU#XF=JObLhLOCm=P+hgE|THr$1ngyJ9Z+BqHhB^n-S+8!EN`cZKjE z9f|$L{H2YWoP`Y0-$j2o^#(iAUg>M1EYDU|7w*WJ1lobs{a)5iM(pg{(mA%+`hO)} zUo;zV65Qms&ZeMaeZT36!|Bk>iUYib%sWvz%WLDhC4(YD*#Z(P(&WBBUpq({5hL;Y9)Y_;1HdyhhDY1R%ZobQ@ih5&w)E3!ALgs% zObV@*KUuIUp@PR|rHz17N;&IuT(eE^<(f!ypc~JNJzr}urk|XL(rUy|9jNdhMe2sj z-V;FYBl!Rc78@Ok!p%m_*4=x7M>~o$XM22Ya#3p0sn6{xc!BgK@K=+AcI{7q@><6;*(J3GA2L&naqfP;>YAuq7EV$89w2s7YDW~T8zu1l=5+UM1HGb1tgmwwbEjGZkvtl$nqLMn?dL} zE3K3ZPIwI$6!<%?soK3G;o#Mc-~(ZYAh9WeLxoW=8iD(`^NX^m$0zC0ATrbl&Wu?I z@Q>+JPeFAg5Nw^yWnxdr%E>`v>%%AZcDH{_D7WlXwS9ec5O{Uf6Pi!C@QwzUggm90;_>)Xi-L<@rA5K zrrUmB5Bqy5wHV>{F@blI!9=WAv+YmoFPE{n^M_{A=!}TbBX;_knnJ0lKrw-ga(xjH zN=@%8R7h^TUC-kJ^?;6LP6F8H;DslBkXB10xF-+4B#f}9MInw-Nc{8CHdme`iNulx z_}j)`e2R~|>E7xJDMBz>9{x1TU`5#)SYEo1NsdE&dG%z73x(Zt`W0o;K@4WiT?Al1 z{-n5!tVy5YT{TInuC%N=(lxy6@9C&2lZH{!;xCl=MTx|peRtw>KFti*qK7o$XTugs zo1kcddo!V?*R9ozj$MTrqy?D1ZvZekCvfU^kbHTPe7OIllX1}DeMltMn1#Tq0nl~y zW)UuQ0~?QGMG{C1K?diyeuarEQZ<-j#5rk{(Kv+^^B%@@6caMPQh z?@gGuKxAs5rKG2>SsDb|R|^n)g+D{mwh3AVimoh*Egn!MkGM;K3V1DM@I==c;9b!~ z6`Mn88~T)WB*=jU(wWe4d3(e6FX}21;#CWwP-x1Gmk#rx-(m;_a7OI@s3sTtPkz^4 zt1>FjNdu>shaz1IFKXI}mDr&CKvL5PeL2Tm)9n(HZhViObaz+ZK}5=+S@cuQIX&?U z20Tq>ULCx~r@kPjnX77$WA@*kJ^FEUv}(_C2EDjAxV(YgeToA&^*A^_{I zCk^P+6Jz>^d8HPhLwz-c!t3L@x&@8b;0#lt1>Lbnc@UOJ<~4cV>IS%aS5t*Y#^|mV>IpvTsf4ty0fVEi12o zg*DttAQU;Gv=IZP6>}l*R`9+o({TUoX}-(Ub-n73h?RG^f!hYT&zLR1GU;Os2g@nZ zji;ZfpwX)=>n@GvCs-+?{8_zx&(%9$aj~06yTM}VyV=qRrLkA5(&P+@gl~^3Jytie zJknXdN3i^6z1TmvAn#9Nj&P})eu*{h?7bXUfoZZiGM_rX3k)b}9A}#<7bm~tBCNvJ0IXxbacV6+_oM2;;4`$l)WfT_|&&<$~R0BPJ3s_BbbEG)szuiF_CXOG7SJYkMPJWoD+VTC`fC zS9@agU}++^Zzlxa5x(^nNvX4oI0h;&tBa>9M)xa?dw|y#h8$v4Uc1CP3{Ej~fT0(f-$w%MKP!xhuPpj;JA+AJbhKwF9I z1Xt8E^Y_E)leJ7HauaRG6$l5N2QCdJqVy{y{k4P0%DRIaFhQB0E+nCYY;yx(z`=O~ zx&xJeF&ySMCfD|J%xtUssnZNGw`u`g(>+V*cIXvqXK8lPEzp?^YI$ zhR^zuWU9L)B^hD0ZA%AR!=avdnQ|*eeS1&kQoO2P(FRk+#g3^uIADi{iQKvs-`sh= z_hkf`{`(#bHVUudLXBQ*cwDIZ8bP{d!D7g${(id76BYUa!6+1Jz(K6#=e#Qd%oTGRA4RS zy!w($xt{S+3ogsW*BD1GE3yB2rQRNr(f_WM#ikWP@Zko}hxthsA%S@J-+YmH+vGv~ zX;%Kc0<&{g^3~&NF=BjB1zJp2d$Rx4ZLgvqd00&`zOs)YA##NQUkC4Lr+n+2l&I^f zC!{wsLO7zXE$QKDNa#`?wdq(VQ^?M&r~95+)37}kLabZxu{?ohh(^IZ`V3SL1)p=} zj>bjmTybLmO%31V?v~N(``OEeSNocB#tqD)jQ``%t3%FrYQYe}kc}_!n+ww~0`Sf} zF1eGR?z!HN$4+Atembb}^{n&qOe~bf=rNB@PT*|`T8J;$iO8}1v#A+7N;D(Q*RJ(p z{oD(PR{`o#S#K_Iwy~0u!*VPW3)h*BJL&c|`+&ajdi%lag>me?&;k;_A`rXs?u>N7JlilMF zB$y#VHRD0ErW2RKcaazceK7fWpK;&ht>+bI0sqHvxVK(27VmgIS)lB%ACzV!KxC zN>8qg((A-gPD^LnhGScAgnIgTl1ldNv3V0?#_X{>D`StR&_xBujSrtMJeu~bF=(aP zxh!d>K27!Gr{JHt(GCLPa8pVZqhSx%H)7`709F+t2{?ue)pYJJQ% z+s`7>H(YwdvNy-uE?H)>Vr}GPxbwG0`>&bmpUGex0lTcc`><;MhiMK-N8G6i z02Mv6YosjG;dq~&=D~Ov-0)2K+nvfg_;(?4*C21!R~+WjAznH%jV0R1FJZqWj|=!V zjQWg9a68sO);k$x?gf<+6ANEzdm`@=>sZ!msVzhy7xb&)b}`McAx&JQr5&tWqSo># z?yTw6Rt^ld+KVU%*V^UdxzN;E`UF0=-C#MAY3IY=*9 zbhFQYkY}428)fLWW18hDFwnISd;(!V1kB^|IA27L+iy>YOP)6=Ieb8Sz>F7zhzacchi)Cu2dZl`euHo%ADbGe+xFNpnEv@XHs=^OiZo>oGW|N z6qfFdT}~_Qx$>@FQq>QpH}}C!QdCAJ?(h&nUlqry73oYvLsLf|0n7k_O*syi4ua`` znDJ^7lT3Kf+0WJZ`@ahoFZb$(g?ctdvmAZ0OJ`}q7O{h%4+6j3l18juxe@e3FYw}f zZiWVkE=GsPSe-XBF0jSK$oSk!D?>@bLi*`9P7vpW2jn)f zdvC0 zbe)));mQ(--A7?}{jT@Z*)BTCpPBtWxIFJXoV8D0{k2ow|2^mU_9+6GQmpJ~1CwT2 zmZ=+5T}63zIqQX$DW5dbk+n#e-B^ySuPLuYZ4!BWR^MR0UPufy3(jnzQ;x=H0&PR^ ziniIa;@Xjjjpxm{AVYs`25eqVF44X}NzR?=P9T&*?gwS?uc_Rja}1ofnYI8QRT_K- zx@b5f#HlviK|@7TXlZ?Hf4+IXz-#<*Fv9%n!)tOr*!@DIpZ#N1f=s7o2Jb1*KPLRN$`wEwV09)n| zid?(hII~$#xumZm@)N$MJpJ%yYp-FXPv^x>XVRzNqn}>j&iQkuPa)3_;VnuWJVPg6uJY9AdJNisI`6Io zY=Me^&Ys-GI8^#ur{4H|(|Wo*vn$${v@MrY?;xF|-tLE}7-zG%#l5cu zQJwA01WS0!Z}=k_C8H*26QEY1dsBKSjZg}OtUwi^G ziqsH~HT*k>*rcKYs&2+lC#r#KG`a6Q&Odkz1}$I>XQ1P4BQsNOb-}XOJJ&EZyp(wx)i#NQC!+Vcq2`d0gc*On&h0IF~|9t0KM?ffCN>>=?yV~#m zxY$i40hxq=b^iN&{GW1UgZDlLlSwUk(^!9h`Eh{tQQ5E|*6DpHZ*s((1Ty<5Ma zK7+($3_*F_))J(xH#zw+BhDXki)MtVj2#>;;07WVpuJIAZw83l-I~478`- ziXgAE3O-$L0oxnCf{gXPFAPEkfyWrsc4n0((HyIk=z~wf{w=%(s3+W1o*dlT)K(Eq zS(i5_)DlO9Iqev3(p?`}y1qLa}3|8jHw3e6|%e z86&88PcJQ)nyToCg$J^)jqB?c9NmR8h<|%awUau^=#~gUx~gaz)W{UFQt9i8w~JJL z*f>(n`<4~vz+#i7FnVrA{mQ7a2}_e})P5GJGOrx^swLq}Ic#vbCVZi^Pjegt@Ud6f zcF6DKOR5s>SK+obOE&?w(5rM@I|(EMg;!q2i~w}mc+G#pKH3%j#HnbB3Qyvrs^OJL zJDl8@n;f658$a8@{MS-u5L`=L4rxYaVaTG;5s{Q#g1?F2Uk(NdUj6?0w#i{3%Q;bo zl`s9ZS85nAjKZ%e}F?t+w)*KQKT*v7FaPsg5#8X>K=3>X;jD<>g(z*iplk+Z-S&v0}=%?RQhXTIO!WGVj*{DEz!xcfMsOQsi zQ+vXzP{H>(9a?J-TT1s11I7j+Civ_nL*MOKZ&r#rW`iG3PfqGtg7%1SnGc=|k6>T4 z*2stEv5}Rpo|cO8<|sUs`qrd@4Q;<_kxYcT{dJUk9`jCk&K#i{*_qmeIa`&i>J$t} zG0CR7EPE6S$!13|;T+-8X7nZa-(xx+klRtn0CRf=zB#+MYoocCr)%><@}=tw=Uv_M z83&bY>1bvnaGRV9`KQe@VUnO^WCqEISEKxp9{5Xzt_RS_H&k1UA|cwtXtDA|(axb=WYieq67Jdew-DDt}vpo{eJtb}O_>=@vTo!D9&3q{wDrH7(n;pK_!N*xx zfh2ipXQ3{12brQ5ow}(kUJq(xjl30nn>xIkNIvJ6`5<6y{*RnXpLzIwc4 zExl$v8UxCFoUP_UmC>4wdTI((-CQO!Vl@{XLjFV2|bD>;P-< z?G83)R^@zCf)v5hVd@5eQ{l=$>DSQq{{UsrYpU%mqr1K#(H~G@FWC7%zvT9;;Ov$T z2@g-*#P#uV5 zeeC4H*{~NRqGm;Ebj2sc+|8(@vXeP(Y`h;}*_Jg2Yy$B z!R7*H;6k6bUsgToAQ29j8@}=Bmi8{spVE<@R+=~MHX-}V=rB#vIX@`dJ{=t|)w@0v zkE2mGGqOjW^j+-nulfdKpMJ=X7Uf;G=F(fHo@((LMiCtR7~eNf7J2d{2&Xr2roIB6 zakD-9BmL^y8awtnuK=pIZ?aI$aV1^5#F6(2m>NAmxexmJQWKebMdg1@CVxJ|a#`+% z&tDT>e<;Xl&(L(tJ=bDO5IJvDbe()a2vzx+(mYO4+8kWGMi^E2qPP&F-b7Q_Auib1+*6suOw;aP zV2C->r(YkMMMOnc$N&88+cbT}mc=iVq8+{yPWFa{zXr8eItm3)&)upNd8T|pTIq-^ zU*gIOA}+2h1Syd371C4()@KrK053PsJXAn^+6Zvt|VXy?0UUVa)w6xG3ZCzr}$ z6-YwSq6Yn>a^7lQZxAM&E4`E)2-n82B{33DYujvI0yt;HD!VO6V>QSdU560Tv%<9v!mS;0;q_%ytsqgACL3-FHFl&|n+fXnxfIt= zp=PU&Q|2znTfbAup2FV@sA^SX^xn}U75tc?7XhNPSaH+lT($`mY=@ZyIda5RiUL!C zZW)aO&%?~jfRojj$%O^}PcgtZJ9AJusYknDdIQOh(xY+=uk^^|&{Rf(y5!+Ey<-`@ zNunoR0$TV6XF#u$;W2W1&F4Ao3B_a>^||hQJ%TJTV)p&>|Dpx)Wr)>z3B3<+>}1}# z;m*)M8Tkdu{Xg8s_!EY|e6M@F?cTif(@$!qggTj7KG^cc=6;g%2V76ZbAAyb8Hvy#ZL233E#kDVmw+C7&&joeG(Y>v|lcSgz4<<*&MBhLBM*u zcBRXIKo123$#4mR`kPk?z!_70+OaEp_)+pWZU8TlAkT9aYs^lMt(`~<3k&`FZ!NQB zlm`DOWa$gJ82<+sFf5m*>m*+HwNr zxV*Fl+%_kvi8j|$wd7lGn*4`j)zj)yS5*-sZS`2Rn^M$sg3>_`_k?;{9_8ub z0Ff{jH=4crIG11S0=~k+@=wV{?l?#OpEZ1v$XrC5HkpmlFZ>9a8uxn zF%hUoYkrE6h$c&rbYpn^YPavgcMNLs^WvZ*>g3v-WbkLq&})PUe$LOu+GEuf@cY5DY>J>j}-wlyL*bV{L|J0lusid$bD%g=yzas1Y1z)znHsND(2rV8lX z^SRtX$MTRPlMpTz8e1z9lZ@Nxf9-h2GwlH*CSGfHzp>PLaiUM;kxK6{4`roJx3}SR zHk8S%TX`q77!+n<`cwq3(<2695qS!ZQlKJy5FE7Z>TJibyZ~%AL5TMzs7`F|>^x6o z5Ng{Mn{zR`{YNyd7l&$TeWPD!t;oJ9+7ySc{-Y8W@4eHad8TEbG zFLGPVL*3RPezrDS$1e0fGwR`%Ge4CCrK97bEVs}DNhewdcH)Q85&ylxU-E`?YbXx7 zd-ccCb!+WC@yNOWES4Ni6%V$sYfc#TGHkGb2t(_(qs0y(5yd0Uw98U|LC&^l9to5t zkb{V8WFw9rg!SkqASNlvd|nhXR0CQc+#0YbWKPec6-9(`aAd+dN~gcgGMnsi=W8Ik zfU0yW+$X?;7|`3fuk(15;y!odMw)X$YadBG;{f|8ao{F1fOoPqRGjkmtaIq`4B_Tu zowM~QLpkCA(Rjz^vIkZRPeYgz4bYj8f~b!Fe3#Jw2^P<&g`x|NrNOyF{wmz!B}`_3WxyDoo%O|WlhXpq zhPCmlh+%n@@k1K(K}Bg;2C)b2VNG~dX0t~&(JHqq`SxOyfOOoAJg1C+7_1WJOT@pQ zhT7&PodW6{w6h5wh=A{+0FGfnIp+O0N5~) zY+YaO--mOYc!eAiNeZWP8DX`SB~+W47}<-a-w7-9>}*NohT#69ffX?1St!&JOt%`| zluo@O(m+SlRviKK5P$6Wud-FK?{z)X zlLrGq>)w~o0SpuPXV?l;H6msYhtAj5s60AbEW101UMqk*#GdHV%g^I*JnerB5mAAv zF($>{r(q*lsDpLC3|CoFOg%#jrk)wmYG|uu%6%vDT)D)f2tNFc4htdNAos_$2qKpb zqWd~QHx^t0rD#AiY`W7LtzQGYYD33C-e6?rqqoIuOfgsMynblpTj zkcBy&R~~R;#^0;bkl-$O5wkNmKusP%nZWlKmfV zN|BK_#pk6lg{3luGp~8ACc?nGfNFkI3va1Ys4ky&e&HR zWqK>Qp|X}-B6fv z`04Rc-AA(-$&Ti@SoMFx!Ygax!FAJq`cSnlgT?qWLtFW7e=1cwoYBQc$0O+_O`I@H zs8%qPXH9yT5GI&))(@yRgx1fCcEz44-VpTVf7HBeo)bWVcP(zJhG*ZPx>aGx#@nB(44MOtu~=5r=|7MYI= z4oM%F0s->5Et3P#x3%EBx`t)`)A;g?FmHh)*7+-L+`Mhp6k-3^XvEOF$ewL?SEp9& z5kX{v*!!rMJafU#tx@OKssJj2hjG9#Ui%YTS4)N@AG)hukQ(3s>DSzzwR?1^F~oT! zzzPrv{|>^2Na_ovFmu?$=E;G*&>RP-noh|Z`D~})J1UBc`6`U@viul4nqfS%OQ(9E zIL3OpvA-wdeS>tW>mp9<-l!yiqizRxhd^AbsCgR-M5q$=*El|Km9LFBkL7axYUR1` z2I|BvB_JjBghMkHJ}=RwA^hjs!0@NPZgWg+xS=Lwu{?ostiQHU9_TSr3XWXn+HsMv zjM64d5Qx4q&6VoatI`x2IyrkI#>Zq=-^{|3WQCvM z=$|X_ZoG}%R(Om%Q4S)}B>Jz|c++g!9B6}Q0A_(J6GwX?WOz-1M*V#za6p#6I+aJ3 z1i)bANG9#M1Q}#zQ?!^K0O=)OzOw=+!Dq=f9i`s#BpOf_{(ByBSwM1naVR!l%)!3~ zr!3)H6c%y@t$lFNS;j=N=S=r<&b_9)LbvE~d<^aG05+l6s!JS1nKs-DBT@<5-&=Nf z$0if0$$vpXJ#U=Iqk_mLcYC$_#L9D-%|8?IAiygFF**MWg^nHl%+zels(ao-h+_)} zCu?Ou>>AgK>H%&SN{`XYaWz(f zRe*L@5v#|#?*Z~w+J*1{^>qivjmG(U7`gzkvJHs8QBXFfOrNIz%R{1a7*G*SW+8`k zlfpR|q&V-Xfj$r=1QYb|&h+De6=&N^q5`MlGU7Tt2!{*7U`_aG@&BXiEdQc>zi2HW zf^>Hy-HkMoihwjCjil7jjf5~XsI=6eNOyOq2#82`N(_>NQs*B1etze?Isd>g%=6r_ z*S^-;VOGH8h#NlhLg+MH4xIPH>wh17pAfhu0bY_o4+4AVApX(vuz;re{yjN)(Ejkd zkq7153ogmhNZ&O22`;cKWwypS1I)c7PE0s(g}9j6}bb)y(NmPoC`=H9^f@-$hpTDf&CYD%e?>lvc) zBs}7Hsm1qLTkb<`?krZsuBT>0MaBR7z+6MI4Z$hxnmB-KNu$|+%m;t^gT?jLn^1<(}Fh7%r!WzXFeT04|f@<374 zTS%1T#-J#fV;82SSi{|IRZK9T+%}X8%=YgmoQU{uH{!sV>~B9J8Xm28oU3eOdsEpz zS&*yriWq3VtO#g{w^@i+>vUVi?;1t5X<7sRG~FZ|v^Y`M>iyhr$81G4YY$z<#HX&; zV#lluVA~qg zsPBvsPF;4Joyy4#_r#mMIIfv=gVmeDbqOBsP+vj zQ@Ub?@|tqO)`m@S1=PC?`}$udCqqIAP>jV zQxKK{8*ObRc`w*?VYpCR5u_N@{qPqg?I3CNwlGC!BPTV@iOO{J8-M--f5_Zv#!42}tzf?Vr|Y4+l2?ilED zy1mwst@Ad7g`;T7JwOYbqxgHQO*C6c;?1SRrTE_D+Z$56a9jaSXK2CNx2{;@V!wK; zK7KGvfN!m94`B@iU$D&qusDV6O*Wn9fDQ65*6wfllLWe*xRc2@sxM|<&0^#WRsYd~Wi*#7DQj9zOKMS@!;62b@^j#SM2XRor( z3~E-PG0zq3qM25(;Gc9LdN+N-=6 zfG-TCMKg2n6d!!dZZZy_BJ371BI}RAz1qdDU-5m>k{B-_-fRw^7LxX+2f0@X1Ql8R zU&_t8uFe~P;W0h9lPGG_y1nTvG;O{90Q+!3PNt znSfG74shLH?x1g2^a4}IzYE=HScD?qx4kDLwfrW|YvUg1N6?5aN1V)NqA`i5BN5Jq zNcLhX=dH!U&hvOlp?VKC$j@i@Dq1WF=KbD5BW7Q&Scb8q*(q0l^+;^~{pRyt{Y{si;`m`FBY0M&zR-{!1FZW{Dh z)--UoQz+h`X`P(+U_egvp1NM5rn2jGz4Pyx+4$hO#(+DS$M>7YXiOS`N;~w8u`ne> z-|cSMclIx;{W&Z~K`{M1*}O&tC0!j*KmwSIKjoR08u{Y~Fd(W>oj5!3lCr|$EY!0_ zj8fhJ28Tw{3r8iM#&?c_np&YqH`Hl94Y&}F)(3LDzHDqfa1cSDBXRK`EK_a$BYizP z>Pi!tG6SyLN+QN}_Am>>L101*5a;S?s-e4mpQNd{QLW!hfoEAK@PG_|SbB{!a7WDysr9OboJ*hoHHp_0eZ?EWeOY{ORdg%MPOpl^J)1Cm(oL=_kDB(H*Sr1bu z+XJg`VLq&3lMZ4sg!xBXt8hV#lONoxfdR?wNI=|(Vyb-`M!R=z8zu2dxXkjwr{Pb< zAUR`P9gs7u6MOM=xr=uS=ue%aprWruDEp(-f8AUp+)^M1wD$yO=d}+a~hIb zcpIZYYVAovLD&m~9N-#}oD1^{!cQ&KHdn6UjQD0sff#I!iumbzS)Kdwa#7$81+Muq zBiO}iElPpuhX%kafD_^}`~3t-$)&7Y^Z8Q>rsGu4awFTjYP?OovQ$8L?x~wLy#!wc zx+{JJghN6n#|Z279b?kE6qeLdylvte9CQ|~0Gz5_4p65$n51^adV)|PMR|&+#S~9C z-SC^5BEo)VAe%CB#mm%8;hBDe*qdNK{rju@sH`%?0b#g9FSVB(G9q$rPCu)Do~-69 zZ)lj7r7|=$>;y@Z;eR6o!B|??i(eMc1-xaPM*7-;j|aBs49GjsB><*HVp?6UQBjZ_ zLs1a7kVqqc}dU=e8$xH@lph{sj;xl7-)Z7GrB^Me0Wk5u+)EdJ5W(N3L|9`(+ksOf zV#$AVei<~yHz3=`J@hvO9SHvpHrcP~M@VN1a)kUfGgzK>WkA6NmE=p}$W`N~5fCaw z40dc`^vIXR7*%&NeQ{jrHgrD+AsUeldU=xA?C?-pUp+Xfw_R616X4#U@k2v&i2F0E zZPLM*h2%)ubZ@2^bA4-T>u)?{i`T(hAbb=R6T~T7yRtSHnF2@x_Em#- zfYT86;%HKG@5h{7>t!kw#>X@jeJDGX?9A>U0QW4UVQW~1*|3=ufNzwoGW8D8CDm2_DTg&vaR-WA0%OYAoNM`Zh7> ztRmEc>&3nbeg-~pe*>XdX$9t`4p!f2NnYDw4LTn`dN_~D2_X4?q-dyZ^X6*omZnS}8$^LO8P_9` zRO+#Sx;Igl#1@Ede-L{G*kri{_r^+7@V4+47Q`DNl7ZPCL{D)6-ppT#V!NFrHW}3N z18qSu4?^-vTFZ++33$g|0&ab8G6Qcto?ezWwJ`b)e5xWygeZ#b_EoXdyc=JWCgA3L z4*i#7mTwN0`;jE3<6<)@J=nrGE}_J= z2b^^<>%KO=?B@f{cS9>86~4GjNWaZDHNIs3mMlhi%=P-xMZeha*Uw1kcf$t&rP0B+ zhquzEfJhs$!AE|lBTx%ri=Y*3GEn1N@l&gSzP33U7^EnAa_6m;Y~rFWwLWH95n3hP zwM6A@{U&w765dmpoj?td2cmfAaAN5&Xn{HZCzx!JoM(s07PTm=AuV~VD=I~Me+!SM zkHrVW>L-e|8f8EEHNu@L0uNptef4hFfnT5ZayC3;4+kvh6~NS(mH?-^f^_->0kk?&J z)EdG28iG;UjTIbG4`<}YG#E0@Q6B<%gbyXEGW6gfX?kJkqn@rRuuI8>Tgly8%W@3oj75OwR((1#C+=zi& zZS7cs9Dl^Rm&!4c|H{`j>y_^>k!E5u@KyUNLv1_Uz zuffF-@*D^}_=TVQb$1aBv?Y{L(vZURUaEh@Bjo1`q@~0WpLpt?i3(N`09o|`6XTYM zysKmQsnP?^0pN7cr`5;@g2mZ3ZX;Ab!-r!~xlI`l9JSS}E8{TSMN3a(2C!{&u2aQy zc58JCE==PE9Yon(|MNUkE6-O$JnEy3MiezUqgMky$Q#R%3@8HD-LekCZbRIhqR9LZ zHPgRof-}F#KsECl=e>6zdB*&<6@Bp(s0N_B0iehhvx{MpXIV#ZV!|?^ztyUp0>P!s zPbW3~ei0n(wx8_E$M->^ntGklp!)UQq%9gF4GPZ(7WNSD2z&N286Xx#Omc!i8+@S2 zOg#?);V7pB1S2TQv}r*Uc4EM93OoDP>vX~d`8ffjosSlQoa(2eg=1rN!|e$g<>L$3 zd;6!Fgg>??6z}S(+=VEXOxo%90kJ6mV8y)aZ61w*+C91VoZBiPrIK^F=DYHsH({6c z->(pAnBiyH&YD_${wd!r-Qhv6P96hjW`Ep1OqKsoraUn&XveTSMpPzqeP4-J2e*Zf zFd1M$$ovFwiYOAK&Yzy}{}sXa@WssZ3myuLuv zB!U%4#U%idUxmfNzJj2J8ycU7eocsHWw36Lti!u92 z#xy2CswVtF4*GXWtrGWM#N}o$qDw#A06&7MG=x<0n931xv#Nw0qmB`~j!N+OP7w-U zBC*-gZ)Ol!2r^QP*#0Z94GJmKxOc}%(67_ko3#lQdopCzvPddE$N{}{#(|=4sRYzp zLxJm1eMv3K2BB@e{w&UUc44Plo_UpDPC*9%A6=SaHFXY=r{NJ}lB9`1D1mAS` zchne3aUn??v$dRZPCs6d)#E5|BrwV#7_{F6x_&2k(!^yhPSwKvEh7Ve)zFyx%;-QW$lFkIyAy!~&x8s<6bM)O7Q1?UM8{vg@kS7WBc{ay1Q>RXOJ zsrU_=`K>Gp{&GDx^PgX-1oRzfc{cLSp=La>`GLouJG%v3ba<1;(8^3jI_J>Q3 z>#Rz5&5Jq7%CrYq{9e<6@SJvy_ZlsHbTzMMj{)QN1Ld#0QCmJ`#ELbQ;*f2C1k(%janW&6Aj1?XPx&H5m;i;JVc;|Zeq}q_f3N)BoX2yOyrs}$_G)_t z5Xx1^?wglG{5y2Y%f*TB!F&*N<@Oo5$a|9tWzOYDAJq`TgGZ2GES27EUo$&SaXVAU zTgVqUUOw4{&kCZ z;WIjJt`+3NQ{=CY-%r~`fT#1HtnhXRCh=lIYf3#_gmke8DhWfLzcizcOj?qg@sQ`~ zGpcd0>?OE5QJFkb$MWJQknw5W{&R%_lmm8z3>}oJr+$~uf7pZ^ha(#z^?w^8Fwdb{ z#}B5CxfX^y8p@yz<{ss6r@M@)dFwFDMkJ5z-h`@?vSbFrHtGl6Q89?5d}jgZu>PpG zarGdj`v)Y8C8Z_?#N^P*EdU=Y`Rm^b$P4S-Kl%9w%cPg9UtWBHMIIk*pQ_C2*s}p~ zGH}QG4RriD9$mLoz--v@5L4dNe~2z`46VogB0@W>r?>F=&glWlTWPucm7pX1OMGx} zJN;YZCe=6Yl#S4O>u(TUIBP(I1ao(<6t=me5UizOp^<~OWA zL`Wnma+_xd2{BLXMM8kl&G4;$WOsyh%@pojBG_xn8F`Pl5ZdhlBbHEtc!%l^7dIcroq?mMC#7ytJM zbz&!1(Ugcz?7v=fZBVUjeL-^mO0405@Y1~)a#1*mx zkWLj^(h4BJQ!5mjgb{wt`_Msi0QE;vuhnpTQbM7@VfP#lkI#s`!Ldn<#RT(KuvYN? zQ|Y_0izi>Kj!`-VUmH@5k94lOpnir+T~^sr6C=<*O}=*W&3Om4BmM8a12Wn;e^7h4 zy^aB@G|svNuZbT>R8F7Z9+ox$iF&K-VE3*)KLgQ@Pell|$34s}S0lZ@C6al6MnEFQ?y8MwS*<@ z=AnaT^owHNbgE-2LE~%Qm4_X8#-7BL2-!#!(lS z*Ff4O+W$T$RSS+Ox?@m?r0MCt)gG?g;V08X+j%)>Z2qecHT0Lg0v@=DJLjlUygDo} zJw=G|O`wG4s0}ev=zuq_<1GYT9;mXud#1OQ0rt1CCwD>M_Um`Dki&-T)~0#E%G|Y@ z&n-tki66dyfo0mxm5^#88Xn@AJ{v%-)vQskisX z0pbgucTk)%n%HuH(D~tV!n|_}ZjmllwBg%eGMshZziSkR{eW|g?l;yim)rZ$2Ma<; ziIORY!*z~IziubZV#X-=R0OHITgty!9>x>C)Omc1DN%xrvo5Tr0&^phr2l=l`+#T* z!z`j`)Fm{V2AEHc9)epMeO%_nkHg>mHH-LqM4%6?^)4^8&BODCxdhG5T< z^3ur0&6~>7=*u4-RI_Q(%gv&3v@evCRQY2iKZ7r3dB$<78GwDQvm8k{WZ`#0%`bL^ z?YJOU652S-QAhJd%#mWG@e!mRzG4>NP*o`Db_)=i-um}BKK91#w73GXdr-h(%^+T@ zxF8;hRM{>JdfsdZAr~r&(G!Y+yU6xjC^PtVuXAP)Y(l7jyY}KciK&0SI6u+|E94_X^LpNV9*c=8~6WNh9;nNV7J^#00(HjkhU`)I;-YS;QBZ^pD8;SaL z!#<12Lw8S9_2Qcw(9>3hi<=h5iNiogZ1l9eeUOTaZ#eW$w6+qM3*0e zk`qtkK_}m?E#}Q@Fv(|0e;&_a|05L+&%WNkr^tm8!_mOi^fPc&Q}yZqe;AX|3C>7Y zNGOXWt+Fo0&qR62FZWX9)?NkG;;w>_cg@t zox+dL>j40x_zLM2Kz>y3);?m)$gSr5Chg7WV>t8o$3KrP9+a?cTeCzp?yD_4VK z_^g%%nQka&zQpZX2C2f@Q?l5)(fK0E!8Kxu7#Ll(eAB@ihic(2m76& zo3BH)7tw*LhJIhvW>I|iQY_U}s-pj3J{r z?9^Y?)5hvK#u_+@hjWN(1V&MH=u8@Gp|L%_KIMpV{Iwz-im#w+h zhG}Vjt!`k$ZR)E|k6bmmsz-}vs6PjnMDY(~)H4QgQMg@Py5Kfe8T7>h?;e6%)tWj@YaE_ZQt|6?)mh z4JXuX+EhGxi(bAK0!^jR0)8q^XylzmRB-Cl8osBFU9bTT(+ulekKw@A9PET;JpbO% zQ6jY-EOl!9VSc>E+7~^l8K1?n7*HF_fBO?Uk(w6~eixK{HhOSm8zoAor1et7S*Bok z^H_zWJG57p_NgAHO6=j9k`z@oG|>f^GouS+rbNQC_fgupbxfmB0v;(Z%wPmQA9l;1UEQe<9-! z?robw*56bAu%fcZPKaGZt^cZj>r3VWlxbW$Hz7|W(>yQSqGN%NBazm6#(B4Wc72Q3 z=3YY#k5u(G2o&!kW?(Ft!_SZ))f_ETI2VmqFo*Cp&`|#Oq3e3o#Bc(C5o=iIW*gNv z!h4D%JT*RdiiWii+0iy`cfrcCP_ABjKq&py>-7McBloWHXDaCT*J1!RT7!Uw4-Hb& zaQb(NdG)q&ht$GYw0I?Yi2x9BRAY`QNob>`%qHS50qyrf z9eG?gKS+WPuH^Uo1SV9SA2nTa&KUK|hC;s*l+^dYUnOF$73_&}E{K(%Jc7T{k~-uu zASH!-5|HCVu7VQkf2%;JSG)4bNdFP+!1Q8n=P`R@v!=q#azhkvw%vCELC&3wHtGo= zm>ikvx~Iw|;CE^X-?B4FFz=h{c@DHHDm&+Eb|G{5cY!asbcN$QSIM7`hCj%1xc&C?{55W^}*A|wLqYz z2}~SEiP{M8|8@%haxY^@y>RPXP7??81Wt9U2r9WM7>iM?m|{z+z$aI6CobfbDU4>H1T`97&QORm`ivyz9zzg&r@C2~Y3 z>Z`nN|H0RoVR7?jxOugMg4pxCDTpQhh!1%prCWvLqT2j!tfbA~xAz~0QbxJ9=GD9f zGJBLq-1ys-ciHdUng8}!QC=7LRTi+#jd<~h;vR22!PwNE^0UcY;3_BGJKK)hab$vz zj6Zv#{7$V|&!dm@iwF%3FMz=0 zpakW%Vg&JMXTE%~HGk2m-QGz9kO}h4p;n-NytC@Jk;*{PBU)gjxXnrx6+XJ=>u$WX zd0M;pYGK4*#PP|Au8praX5FhnNtTbf`J~`BInFg6|FX$|X2O@qV&T&wXR{U77U-DS zby?VseI-tAY0_fIfGoQPKNKdv-UU?elo}*h{+C!Ukw~Wcg4+lWm}e>J3Y`DU=}#Y# zQb!O?L{uA!{lZ7YuYS!(f6h4lINCp=gM)q`(zFRFEOuK;<4hRorr)FcE!&xOOY3uV zh$bx=;@X`f)$pjNUo5`isLJ06Xj)4hjf@eg2@#ubE7>wk*w@VBg^y$tUjDk-{#X@w z!oC*uQC7`1zkY8g@YH=r_Gk<$ysfp{L}k?yL<-#diPU-m14CD}a$V zb?kPtO`<*UTW@k8mCWyGalbXrM^^&7+z`6+np^U;Msn3Nl+;$jnPW>3%`0`PR58>u za(j1Y@t^~ZaN_utFjeF4t<0+6+n@!nc_-CR+Q$-b$BS1X0HX)K<;VgqaT&9)-S)wP z{JeM^!q!ne4=Hi|_sKqh+N7TH(bLVCsea_FqS5F2M2RWjoR~%;xcC#ZKDEO17g}%= zvtzyY?5imj^N&%6c)R<(C0c+OG5n2xomqhaqG1xSS z$10)hu72~6%B+X^OLX!r(%f?DGyq_~ri1g+vJv7$6`c; zP*K+HjI+_j`9$&^cO~y3w)$kw=dVN-#!?tS#)EvB7mMEpd(9IhQq&1OVI)k63 zjS{4z;_`B&Nh>wa`>}g*ti?$61GqtQWr_C&0onj?u&XiUS^>%wFo|FyYg~EZ8sf8* z7!eR_G0~^3P8+qxh(mL&VrN6F#}@yjkXre{A?PBiu~3b~e$+!2{G%?%{47g+gX zUx~Fzub)=?9}hiv57lP_$0|u0$fX0B^B}$_?4nav3W~o}UtzT#Xy$jvY+05k)stvq z&b!*ts??#UlC}wF|JWJVo>!~QZm2J8hw7G>Deie%-d(cyyp)3=!e;)1R@D%#=Onn8 zzHykh(DNr?;y@^s;X|zfbGe1z!j=O(ViTN54+1K4@%LRG9Z?yOOn3BcG07&vM0=G*gG`oU}b@U4;*Z0HvsxZfy#e-1Vpv=g%a64B;$%MNi| ztt}<;fvTkRHtN~*7D9*V@xg=cwaysxUcW0ekV?__2n)m;D^FtecHE~ks;$aB33xzI zgyN0!Cm9m?cS=bK0XV~q2&*b1o=SxWFjbc_Z^u97#yOxgQYD_4V+w5q%lqgpizM+m zi>~oh31jb9T=J(7?p}xWLCpX&IJxbEdzIpUIE4l0!7PItEkzH3KW{m`yzSjO7Q)c6 zvDJZNN%aiP-g}1i85Nq59-@5{*)^j3cY&NDile5>uHip49qboIXXZeEQNy{=;vr8{ zcp%Z--~Q%%ch9p0>>qy9pVH=U^zT6wM^w4>7AHRq^TVr@u-~w@!9u4o6(9$ z$|2uN@D!?dq5u;)y&(%qMw+KnG{AXyR}YQImqBc|%_JSwi1bhfyei!_yvP$jS&{Vd zsH^L3?3-$T!WIDBe{;@mHfFDq5;2vD=o*lT0j&aMH_YUlJ!^&murv57i2#5Tecs)a zkMjM zVa}%KphkzTjpMT8OVQhuj({srN{r9wFcd!4mf^VLT5M_c1$3Hy@OI|5mLNIdz;E^) zX)@I(%$`8fHN_8_@uQ0y3+@Bq4G2s0>qR4PFzcQmU)^1Qts|^x+WH5ovS*TJ(Yg zQ?@MY+C>4K2FO_L&n0hH--L+O+DjSDhj2#)5A#j*!!b~E6?MyOumwY39ua_$`KO0B zdLV2C5S7>YP`f6;DNY4a5m}&5!uMts`;==&$v?*vk5Vf#V-OD7!{32o0qdX6DkqYK z$yzFAaC3#5VJ&l zOP)S^37R1m&I2!=?X{2rix$0-Viv=B9Q9)^5YvF!#P_%cx9W81*tRHNQO|hU`rjHA zLRPt0Pe&^u2JS*P?(3{qL(I|_e6lr1X7F!0qR#Cv1`OhBCj^yPXn(n|sRyjJ{{UHR zyB8NSq|4c-obBBi3v^CaZZsgY9xYNP0XTe0pW!68?>(FOW0RItdiiG6ocK+s{=A~Kv*XO)D z@mr>as7IB@wT#W5HKrWx;OWC4vnJ#tlbC|+e3d5cEHo@DNogThIu>5Y!=)_>tCUH& z>O~~wT!kzMl`jRNlc|`Y0|EwFWP)UAZBq2q^TM6@lC)PdK8_&(ZN*e-W5t{Jh``4` zU>!GfWovwz2Gz&-yVyu5s&X3nj(1>a|>Ik`xA_f?Yu$`G}V<4R{@nYLTkO@!wicYel$E-k?|6 z40T77QiCM*14QIc1Ha7V>WD!)Sx5c~zc$m9e1; zMxkI?N0HF(A7U`!QzFCF_rWBD=<+X@Va*Lu3DUaxRN;5k&PgIVrhfuP=IV$ z^S=!P_!P-UJ5}V(6i6Z&C{p59d9&xB-z&&$N-=GGhb+DkpFfTnHWl)@9@%km5Rcyx zwX-URGf?ZfK29(!AGS{3R;W_%9T^z+zRF2T|IP8m6$?ARqPsg~Iv+Rz^X+bu6iFT& zVbzP2Sn4Rt1DrqPIWg|Lyk`u8VRFK_6u2Pw1Q-xv9)qI(Kh=a2%3>VTY`3s7i2Wk~ z%vYU$NsJ*ORwXR*Elv5bs)9k%om>}j;Be^9-*HejE;Mf*^EFhl*0NXo68PpKF6Yh% zTjPpXL82-HltE*k(l%4B=I$4oU&v8_W&GJoe>WoYW`hE@C-pcRim6jKAEUe8qsF~~ zYy;y22!q`i=tx+4*D$h$TL0G2HHL0jOh{wAFpsAMNSG1T=3 zJhOLi1RqY;_7|dA$-29|Lb}Sipnqw29{hpxl-o3kapyZch)|UZpDD~yrQE7JdVEYa zDM>2mkeX1}me52RJwrcQj4k!&0)hnIl_J(mIM~q_jSJh`r6dE7J_^JvY~TyHt-Ql* zh#nVup*3?Q!5!beyG@>_SPSc2N>$(>4ZKh4gxK644}J57)odCSSQA>>mes-6i2eID zOr!|Ar?5IV6gpRf(de$2T(`~8mT$&74VKZaCM)ik`AHrxUwF12{kjbs89@bWg!_jWi}yf(wUN&nL^Xt17+3bn5~&{|G9J1v zgM;7V-$88O8zkAM(|JUH2S0%BqDv*D;uh~$9Rh9nl9(^N<#rN2wUru@zkHN}9)+Dd z8#W%zmguBzVVJHmXDkPM7^WDAV`EMBd6?{p6Rv>D0Ly5k>*9W@fZx8_%=t{&4CGg^ z`N`^-rTKp5ASs<(smy&n-Ib`4Ib2oYS5Q5TyxU*o6YhV^@+kEH7t9ao!+1v7NT)B% z7JrP#3e%J4ZtM8?;vL@$3~aJ3>T~75``FntVjBDOY^Y7$gcU}aP&;&vc)J(%5!}4I zYhc)UcYEMv?jHPcqo$tT%bMq{TOt)*KgwQMi-`&ZAGRUZb|&&cBnVVFA0%fBWdk~p zIdz1Qq`5L!z*#gCIY`}gfYy8EeBMPxF#SDfV0Nx>O+Ue0c)%m+s4s|Q=ZLAWaPH%a*@*2&rIH)`Xv&e=N?8E zHV1b>FA-`j!LqP{sL8uE_dzx9i(2@!xxum#Q34ZM{2)e!4TtbB?BBHZoxt(cu%M0P zGRLAnW#Bgfg^a=r%cE>qxKe`C{iMFcO+J_wQ(Zr8Xd1{HR&IBvEnj_%^#&;7^eb=X zms5FBF>Hv-Nt{pSiY*&|n4>+;1M2?zJt0uIQI3OAn{&7q&fU8n^WkFp+zXZ78Z>E+;9Cxa%Eck~#mwhxdE<>G1c1ey0Xg zBJ*d*mzVjLy^FxvVv;sR&|k+xU6&H2ogW8WgioIjfRUM85?du@1Cf|pI0&ozYf9R! z+R?<@in5%Tl}iZ*rHwvKTzQ=9Ue&Sv0TJn5Aw0Yb7``h z9k|c-ZK1=w;WD80r7iv%p?ZXSt$dX9Ty_`(2w2iwte*pRMv*!{%r~tb ztHu>|>$}VfOK@X(H*!o)YXC`86n)<9j%+iV8|$t~s+Gmb9ykj|E0xX$-smFzDa{OO z_tzB>x>|&Y5MW`_^LCwNA-p=yt}IQ-$9qWJAGK@?LI(hF7i)^&dVvVZs&K_$X~HAq z$?>G`k9vJ@xqK^gGa39x(`O>abYrZM{B1^rheVvKc&rMY@7>#jj%zW>!M4lA!~U~r zvjm;b8bb^fMrCUto{in~ygJq785DHOce+azDG*<`<1G2973SSuHtHAFyV?~*Uuy8I zWa}t>aJ`jU3+zp(+-iRYkl2NtrReFWi1R4EEK~OF!FTra*EySb6W3Z&tlbUjCXWFC zV`*zf1;*VRM?p)4v76}k(@n$2RP<X`fZ-8vSo@}} zjcUo)o}yxQx4l5y_5(0z-~UdAXg-Qh@YkE~a20Z$QeQe0W`bE)Gj=!9U+<_K^ZBmF ziTx2;TUc&cacTgA8NJ)Xkb%HR3&3l$t3I>$BRWcT3xSA5Hu~EY3|V%rjfjYdD}+oi zIGT9B0GH;W54_M=+wX7)Jz#s{In9US!QqA!`yng!k(4R(o~FjqT*eTPOsGFsVgrNbB=E-cHwTDM7!jTfr5x1`JjB=*wOk>u#I_+r=49zUJE2^o zjtFZb-DCCCO#UxlTVKXJ=~Q7IN#hnhT3&{bg5g1%%Q>P1Z8%S5Fh5)*H4MD)#o?zi zwbj+)ARk3l@Lz<8@cWRCw-Kvuv|&>cq7EhUo|cA8&vrK+DO1Q&2qAmANwv{tZBn5o zzIr#tS&ZGHRhk*QczyN?RXDxW??5w;QBQ)~MafoQi^sOa!Z02cjZjy#4JVnWLb=Vh z!nps>NBm}eC_}l4%5ByBxb(S;Dhd5NvxFzh?gD{B6?t?1JltC8J_4l3s+xCD8yJP< z&m=$3U1VYyfvJQlht1rv2-CXO?MJ6~ZNYT0tiU1a5SQSY%f<6h{5#V(oIoR{S5@>l zN+IgEzn$7#HY4uMhWv%br{ZZPBXX`b*aS%=eUCpHdu6CT*bo4Q0I_zk{D!SdTD%Wa z5@Mv+*Nz%-#{K;LtDNRVaq#f|(Er}&9(lW(ZMenh+(%%sVB)>_CMxPa$a+7NdqCvd z&&N-A{b43doxAC(Axwy3u(z^~ElTmFFM^GD%0X}O{bdQ5kohZ-~&{;GkF2vi5#9e{Eb(7DlQ8vM&HJq&2oL#}bV0ke+oc zFeOM}Z$JR?=ruxrpq9WS-GLuV@c)E^)*YqN#R&iM~BYHD_Id`{Fdc_+{{CNG#msrPk22>Hk%BU?I%ZIz)zrV+s zR&-_UZpNSwAm})e>|jQu5GTK~oT-m<2|CHF&Ts-Lht;z^O;Ue$@~em8duF#1@Ya*B z;JjV$9RL8Y$4w|En9?v0hDe=jzSr^?OVlKBn2|N@cGMdHLz{tg$HR4O+T23UH!~5o zSMLQT1t{xRowWqBR?!*8{Y8dW+X6c?^97!R&eE22C3J{Cbe>}nS~dVH!vir&T> zn8|eVJ1@H38M#ZQ=`)HOA3!a>i#I{47w!scF@|AK1mRVlBzKmZ-Kp=unYltW-{EAC zT;tIsMOi@C50@2*o$S`yH~V$2MKq$_=T_GBzK}n#J_i>)zVJ2nXY=0Xi6=O@q~!VZ zejl-U*zVS7MNp|T{YZP%>PA#GM*%7*#LayW2!=_iN69dk*ISIe^>WkfjKpoyJMZ$e zW0-+=ffJTK9|xqbzO|HT*pesuex-kiZAGSbJpuqK!TT%Y5$+XA+O+ivVheG$;Le{SN$$44ImBm zC>znL!mB_UGB118BfFCA5ARx7_^|(x^E@%8&+_;j=jaWAo||p)VzzqTv(BzCjH5~5 zkUBc5$+^Z^Utb61aV-_}(+3#w?$B=uFko+-|LF(r@4($@$v{4!=5@AqFg6>GXniRe zbkaBftG}(M(Lo1qaVQ2qOHnmNJ-n($Hjdo!uxqcJqL;dyraP|X^5v(j!UAG74|*!T zwM=}?b^nCXwY)0WRn63kf?F-9#GqHlSO$yv4L?X3?qoJYf&YH7VtdQ?Yi7Ew2~OJ? zZ*a$d8O8V(Or+?Vy2YD0e#9WTpDp0QT8eZP7ezH|fzcc0!tQVu(=kPEO(zw1(3>-F zzkQST#U#?+^Ug^apAz6Z3%3!2>)%fB&3}z=^LzX{oCw}fb33ftKQ&P$qgVPUz=g1W zm$w=13P@wc-AUE4#RT{}v((`JY)}?k7u98#@HbqSr7a(+6?&W8i}a@@EPDuMTLs3= zFMb+cajth>pFkhU`4mqy@>|7hMb9(l>&hB^LGN4S@ZZXu&)Eu<&%|>+GY?l1u8l&!mhjJX3PAY zzOe1CVy(0!$BNhE<_e}y1gyd*FZz3Q+{Qp6T2x2R6O)@sC8EossOx&gmAbizsq^M+ zecRT+75GgKsF<-Aej9XNw83*0uidxn@1(Y(`1}q=F})`5JXt>E*(B7vdwl4XlHjkU zVC>r7t<3)Gxs z6`mX83eCL%Nh(*p`25Yc6_x8>@hx87F6X|wng^-SjNds&HuIP$-kF7I5O1@%jr;t#1Tb#0EzKL$q?>nYUT`mdD47XQ`TW;)OIjV zr0E~;w8mraXLLR?b4lEf4zR!;H7w`6oz)RZ>wPVAkN#R%Pzb?L+G55>XyldnqnuH1 zW;^I>M`}*+OdDxlE~y6irHMZHz*YFYMuQ)PO}Q%Wh5Dqo7=yGqC0-Y4{aUS8{)=-( zVTxgSt~fg_II8E(9Va%givrr*xLVRT{S8?Dvs-f1Jl&>o&l%?1 zQ`WE)o!Wl-Y3Jup`I4!(>h!(UKQf)0ALC6dSQO2uC+~D~+loyI(JCKGHz6n0G--aA z;nN6cRf`n7_<4lhy62nUI7G|*`|G~HM?iaoX23S)RxwZOdb4`1Y{9n zu#Qyc^#K%ZZCi|cyQ=c3$W<>?>>*oep9 zS$qx($lw`qx!?7VKA_;9}ANZlCEgc@jAl}LXw5^F8kT)1Kg*L!d@+Q+A z`U50`(yKTe5k4TJS7_7=y}q?Ql({qCT;YEd45?z1cRl2AIJ8%*-#qYn;I-BppCxMl zmQ{U{)oJdu?eyBr>_rD&M}gC~0{eHu{t{j^hte;H-da03XfY{44p9jF7h`S@uXtaM zM7@k5l81cIJQk8$L4CkTfbbrZ?r@K_`B?V5`qJSW>lQxTp?9$k+ESe8s>fi>7vew^ ze0VtV@H~JW`|JB(9oK=^Y}P!IYL2Q`PxGs_lqz4o!y${n)@OnjCB<>@o?n^}zzc?F zl+qo+Gh6$Kb8|F$H)t{CQB=?Cj#N~PsQ}+wkL+z9OOLY0@@D!{g^Q% zD3zYCPD&kG#*V7L`&l5d$E0dyWi?apM6Hn{!?V{E7aO}Wn8fV%n|;whM<=YHfK}JP zz^W@0-RFGP7V>LFe)ji!hmHv^o0{jMF6=x!qcU7pj-ro?sG0|0K4qSF=ZzvK1PR_R zDi_JjpT)#j=Z+k)=X> ztlIayIf_IBQ}8rAF)S$dmEzXog5no7Zr3YwvK+DWz>&f{Yw6X|{>zR=^py5}n|HO8m)<2dknP5J3Y zg4Xw&!q&x~U<=cJ`rb6E>rF>Y9y-FZ^bc7$w!v`c{~_zD!lL}XE+HW)QbVICT|;+C zh)6fm-7p{x(mixHNOyM#0z-F84&zo4#m z=vV8ye)8wBE)!d_O2lsVI5%JFgagZ%P_jM#kW}ck6J1_jayZK9DU}B9?}$EmROmD^ z-c=;@ra_>3nWcl!$YQmn8$-n%zBNm#K+$x`)`bxDqg=C(Ov5Le zjY)OAR;)_(VZOeF*jrPxC}yySB2%3YJ6e+F*A#mh-5==@%e8xczUJlOCNLC_lXJB% zM8#``r9p__WW7qbN$AG+t*}T{UQphb6v(2dPL0?@Y~Da%_tw2P$)0W>(9ZPKBX@|( zCXrqRAN^ZG(--*wMy5RG+N8>;Jkh~+vYKyuO=DliX_V&>vF3c7PU7|rm4&5#`>@BA z@1D-o51P)PM69aw$@;U5sUB}SpYkey*=q9Z6e}A_wMaCpdb;JbIciS2(mf;~6i_6j zIVSHSR7{DdJrtEdETuEtD~K5~a9?UB*>Z@>lhTt#CzmMLAVhO+f!gVbGe~6g4`jD2 zSY!|PRcnhRd)Do1M`(FCcevo>j#Z<4!vwy24t^g*QC7CQ_2ACc?l0Y9&7pdAL#D8? zHp3NQ?Di?pd`cLKOO>4FNSp9-dwW}}-W>aRLhrCGUm1#woTgPNBa5of-_{+|YgOw< zM_`>29N2yG*4oC#Y_%n_r-u*5I$roSSdKaS6=L(slJ~tWIwKEJBE+ zwU`N?IzMQ z3iFkg1_H9aW2KFT-$K*_JuQ)ruI#KoP_^#Ab*smBI+IRvXj_l7ui<<>L}+sj7kz!o zsl1?yfsxEShaA*(i-{pAPrOflQae#y58g`i1OsUGGL>%b?+k!;oc7 zu-ET1_5CL69LqqONmNxQB#h2^__sjYlUR>~Gm4R<^>lBN*t`SO{1!FS9Zig1{d!4G z{MNa)n8s_gB&qGTe4v3vsn$e8T{op(#TdHNRT|kAmhKpP7eiV9motty_K$p;?9qph ziiS_umk%g&ih3@`7k_(6Oq{1PabU`-s2Fuq?@?CJQGS=?A#p=;-=eK?S-JdshUnK|OWJy)B<`Weopq3!{Bf{_xmva#;UQ1#B5xd0!3P%K^B z?5sPVm#^cLc;qIpikJRrrH(|h%8~u=TitO(aukJITqHd~|Ga#-;alo&*)88femDiL zVb6WRUgB2(=MWOK-GAc*$)Hs=&t=qR2G4ITJ1Bt(cOzy<=SaA=1ZW)V0v7~nz@`PAe z_bQv94M#7pZ_7|82PALJn-Wl$4>p@9m&(iswG&t%h0~!1a?ndhG0-np*Dk)Vzn|CG z>3#F6MXH6ohi^FO+-<*cLwecI@CRj3{&oqWO}{wISQuSq#GK(3FY;w5x6phM zm3Mr`!RND$dR7OF>wHxZ^v=!`8C|FDlRxipY`&3Ff)W>m;Q%Lmf@0V&f4akhV-sp_ zX-=$`pvo#n6 z+D*1TdECB3XDr$#{rRxPgxlm0KDLL71FijM#vC6jUb^s0+LU@?r@_!93s;kkKCyAm zc1arBV$!9}PQ8&hgAB9w{Xk$B+5>qCX&BAypnr0kifU*jW+X%knF5{6(U?8xsFG$8 zVy%XHFwL0rAC?Ln5=EM0Q#$BOwc;d2~nZ#gm_x ztV{E|6+#KFXT+3rKoHN;VCZG}V&1}fzFJL#5@O{wztH#L4ayAEzbc){ zWpgyq*)sh>s)wo!Bbi`W(M!M_OgQHynctI<9a>XK9TclHLE8``d2(MX-mwG@@i+WDpJZ~5Fc*x8 ziyRp0ugbRP53F0~SpH=0HRpgrw2sKNignW`9=w$7voShHHtoomJCy#hA4iHjjC7-J zw8(HpyF@{k!PU0Y*W-CknmM&AwCDfy9?t0~>#t0in000YLbnMk=BG|^ zPGrf_WVp)0R%4Sqw8-$EG`v!PfkuRJU|^Uzc{=`7{1}Z7V#hQGbnD>_N$#&{Ek80M z;{4&ARN416P65el^Mq}tyo)T@!GpvqK*XXn*{ci^UIH%v3T|oqQ*PNnuW%I-1Lr+Vc^5eLNHJ zNiMN^T9u%*LEKX8M;35F&+pZQQ{i12EPEUxdF;UpX=oYtd}loK7X= z%2{Qg63{-Mn4h=!-Ui+|%fT#IolTB4^as}#=llEctZL-;M{K^+pqqTq{J~FUw^?nc~_%|7oDu$Uz-bu`F>*cF`TL?iz9n%PyAWL!uG!ecwyRQN3&oPMN!a~QQ#BxBip+)8FDEzEku!=@Pp2j$NQd%leZJBw8 z4*71O1qnp9q>CM5;xrq~^NJOdA}MuQBqp|`K!gGFm;~WlEy;fMTBa9}rlEH_;D*mH z{K+2`bjFUcw4uWeU%+Xi?$V$+g8+Ii=UK7- zY)Zb09$%T>R_TlMH8kcHZQQ|s`*mX^=9;gf*D`9qqN0UN?+#O%PJ1oV!!os)BS*ru;!`Tec`l9#agj`l{i2V$rwkC0LlFY;==E^8)=Jo9S-!kn>|8<{ z6H$xI#9=MfAfJ=0Q?Q%Bhk-KY4rv2*n+)LW)RktF#PgFZBAiQHxmCX}7xwkkfuXT^y&T!*0FkOBqq>3O9KUM2rzNyWQhPPgp-fs=iRxLkqe@W=HA_}aLR|@ zS{(7631FC0s6{Wdb|hjyEBG~o(6keYqxsaHieeZI53KbpP~3$$i>z)-n7ZpmLb`SsJCEl4*Ya z&(^p);R!!82)*xA`VORTO%hMy)M4rp5}}CTbLWTroNXh=;dSewGIz=yr)zgevN2YU zRwB!Q2@m|Fh=xm?cv`VoF zp=_3r2bI4-$KJiO%$5Mo5c=O4n!n5JeyoB3v4OUJi-@4TrnE~;x6O=L`RXsTG|Kzz z=Jqk7HkMs%$=Y%s0>NL>?3A(+TMFfzGSYMH4K7_c4I*e4e^F)&^vSCBh;r?jlo~!( z8*qkVT;j@bAMPuT`wxt#+43p@NczEfB=E_93WUDwN#{*mC&nmNRyTzW*)A(Q%Zi@gdR1iH~D)K zX#|0X0$yW3-!7d5aA0^nfQ6h*UyTXldx}NVC8Q8{KF+eZbRh4plzS}YtoPBW$Z^aa9Ia*)b+#vxi7PgggqULOqegXw~)YAgzO9 zy5#5=2%%I+3POd%VNG`z3Z+cPGZV0kqb0OamdBP5o*#_uB`WuJdZlK8q*?gS8V^6#uShwx|L!_UcY5YsM8*2=T z-(OxoQ$8;`6R7-uftWQ?wbv+<^kuI}^TwY-vpS8;0hoV_T$}}`iBA@;TXki?vUR*R zq&gD>yPC&)H=~PwTNf7j(q|&0yUKT_y+~yXRVVqAJZXt)S-n!VCZ`Mq*<6i+=qM|GO@1M0t^@0B$Czvb zC#}R=e0&9O)8URQiQj!7QxcuJFHklWeqg+lcy>;<`;ceHZ)e$R%AAxuBoUs-Cq`Vq z&|he;+nD?69}ee2c)MhTvEMTg@)2z*dM(Au6cPyO@bJW{ZonE@$keP*!1v6v(zag4 z>^>d1cupJ_9EEEx0>$=Jy8qxkSvvE|L6cRLi3ac>N>;pNj?P)KtexMZ{h?qUO}_q} z<@DDyB5jLMG8_4pre8soDeSxTPL=K=&F=cO%hKV!)IZkL>?NXPdc?Fou*b#8JuMM1 z*-j%n4WUmql7&U@(M^F&n9-g+YzEJ!J88RO%$xX+Blfx6C^*`5Rm>Bp^*8By0%dPx zT()@T|Cy?ymC-E_hnb|fudcr`d7J#gO?&M9>96TNpWTvg8d=tTC~XK1gi5|Flho2f zD&6~d%i~8SvkX5%@V6KxhOiiQF?f>GHnw5d&{}?&?Ta#Dj3%|zB3mWL>S-1cw*IFh zT4W+(CHCMy&|311d;{2GL$27$P~NL- z5h1^DcZ^_G%;cmUBeOzc)bX$k6_{ra?2xa#(RSA=7cv>N5SZ`SYj$zEwS|fg zdb7m+TO2uPZ2DbLxz$<-SJZ3*l8_-O(p}zg(Nk*nsY18AjA8IM9Rme<|E|MkW2hdb zNQU_QVu7BtFJE9>Nr-#8V!k}4B8gI~!n(mV#jBji2e|aNBN7wUn3w<(6MkNT&l{Ib z%0g~m*}X6~n$kAvAn&hr&wsHjD(&=Ul`|T0l9>@NYbxTHC=T$kX=Z&=``v26TXX%R zzpqpCP_N&s{ZYVh&PVLur_;;(J?c+r&}FZgy42P=OR+%Bo_*tdX2)^X*AxV#{az8Q zrFetc$4G)*0*zajzKB;(O1Ohlcj}uV0#H5H2Cl}zyxdtUzZ(!?PB96Vw0n*)OG&x) zw&#ODQfBHd>a^^G4FEDEgt*P2+iIWYYL0%h>x+Pc3R)Df$>`}N$4FaZz5Y(cs}IM1 zQQz(d$gqvtv?*?)q|2?fLr^G8%AyG7h&kDckB+-HqD!=n!`y;5SUqDiF^Yd-|GlDR zM?eMi8ZDY2EdPBOPQ5>VB`XRL*APFBCEdpFFhX**YF+nb=1_B$I~?RR5zJ+-GB{yH zWDi5_%&5!^?86oCwOCq3#~Lfc5o8QfL^uB2!i!NOUyJ8_Btw843ixjEZv@( z0}}_9q|7DfKBhgYb@e))I7;auU|JP)lT5+^x~iwZu08q*Ys4nH-Unp{MB zgZHrgmFNx(Vf+}d>k?mI&PLr^6jvHkjTJu-L-fElrO$j@gnxv~t%&#UAQ1pLM*L^P z_+`oB)aiYyp=gsUyol}nVU67X>UxVu0AR8wy=o(OZrkE1j;EpKZU$R9^{3XiCy`!o z4?=cINJ2kIIG^9~UrZJ8m?)nPbX23V&qn2x{!Y>{l>@WHszc{@L8v%CQ!^P>N`7xP zsQgE|DrBzkM-GWeO4696SpLoZ*Du&%1ts>ns68(%>J>5f-Pha$TL^N(Y3#aBAmCbg zvtv_eZD|juDNIWt2yO*oCx1yNLRWsSV4D2EvS)Oq}4Vx zx!m4?LcCgZ7&uJ=`N7Z_M6X9OzAGl`__45ZfJF05p&Vr&{jti_>0`+-C#Pps&__%* z@>y$F)$q^dF5s4})GTIfRF(ErHn_;x;|2#Ow0N*BMb~NwYrOh{nhXFl?#c#|wO7xn zjw)-~{3MtZ_M>wpBu)v5iRrw?#R!imxuyO~%}8lm_ZvS#f&$yhb z|5b`V?;|qbKd#)O%Vo@r3xO}^CCa@Hq|w%VK-02W8Ns(@Epy30x5q9@DA=`hxBjbm zSAcgpCG=p^aOEO*)OEj1rvv18<7Y@Ud0w=}|C~9Z|MxL}XB|O`T-RF{mRj?4?XE`P zDqH5DxjwW%oQQL=0D0kI3mi~1+s!jefi05Tzt)ObB;m^tRh&Y~U#VYl=Q zBhGI7uDZ&nD8&P6{<1P&EeZtI|1zpV>E4%z2c!c{2{a#zI5{?)r9L1scAS^TK78J( z0Vx~oCi+hoy(XUhjJh@TZaO_=(@?^-$4r~#RHyG=Y$Tz3GxXvmKgt@>S{W_|EY73j z@fX#^#$XsNvXeq2V)Ja8@?VZYGYtS%v*H}7M`WQA@MG10fQi)!wM$&3y12j9Blx@d z*xl6(%F0efkxXo@Pq@;+JZ2?Uy_-PVxyHkoBBb%}(EVxHi7TnitQUc&esNT~qGYu9 z4@_c~P{4?rA?%&np@1;$!M(D4s|Q;?(xy?aNqZMpV+IlgC-+p^aJATPdfQLiTX>=o zTFNI-X8blyJn2i%+#KzVtx&%qv{ufHm*n?Q)LX%c{31Q&gf)_^PfDLJkdQvjhhd!m${n=?OW!Uo7wejn3N;$Er2_a06uK;(TFSZs{1~>aPpOB2$`Z_eg zhB1oXS%pZ(6&VIHNeNPLQfs*|BtJ^_sQt~AmtaQ_d(Mp@3#PL4l^w;Tt#jjmEf#L( zqr(;xb92Xle9ZDM&LD2Gl$pJ^yB=W&4o=_LSAy6tTKh1#Q_Q_v!TYjxpHAcnN{a4} zd~vJRl%In9dw;xGyPs$se7EQuqS!5;{L7y{SL4a}^X0#9KfNa+L<>~RQOM&h$Qvy+RIkOG^T2Rd z7XZ*LMaq(&M7ubTQ|<$#-5?2(p9dR*)DJG@=bv15g^+J&9Q|tW8JrTw#KZ$G?p?x= zAy;OfN$4%W6~1U53&L&`(OpuaKIl6I|2q_0g3S7MzkhtN*HsB{s@6l*$Pwo7+lj`Z z&iwcx1J@#;l-=1<$zj&z}gW9vY&-QO9w*r?kY_AoYrku+>tx5yU?r z%1l~&J?!c4Gn}3DkjLm#eu?l4KL@nH4$D$64iz@&xVbbAd+=q%xuAM=9^`+R1^AWI zXUv#DKPf?CKV?c9*B2bKse}~jh3tN4n1i**Tt3*l&&Z~$G2`ICmMuuzg=TR*p=*D7 z>^d|3TDQFh-Heh<0VnvRUU+qJ8BA+~G`{CYjsg#O=q^q1TU~!4{RL4GrRE05@QCi> zAQSI883LhhabS;9^~b7^M%Ex8snx}LzH+t+Yq7#Fan2=Iej%Hds0@GW6TQ^Pk_E~% z>|QPOFLZkOi^XJ1O#t@l7YKoZJ(81@KoLE#M|{+st{X)<8Y@A$d(WnDVX-ZIC{%(vLQDCqMp$w)Lr7Hb{WYpA&~Y=F|gu1yJij>JH=2c z-;;!g8#o&dT=AR7Zot5guGtXbtgSN-dd|Ojox(R~G@%o%M+W!%?2p_18d#xlb`orM zr`Kc9?WVNTyvC#!Hy`dpBAPZ>{>{A2(c+v4Bbm@BzuMFG10k|YWmb@`*weU9R2Fspe$8e<~*F$XTEtsDX zg@cjZ^EJq3GH@rFcmMZdDQzhEHwFby}-#S@G=L}FutL6foFwoMz&BVoV=^xJ>psUP;^rJ}M z9R3%3TEDtFlb4Y#2vm*7cvhukD35App@&-RXGh! zbC!5xA4O`1LNmO!X7+swd%RFPi6(EB*RnIVr+4GT8$q|av=QU!6pHnx@48S9lGuFa z!D3uo!|ImIOGN&q3L|vBvl_}UGEwZC{R?w!H9Cd)Fis0za|90>C;H^!(!%GpfJaK9Hon87M+D>r*#)*)rmH6SVf*szY_9 zr$-64*uS~NX=1BF55k8Q*Bs1(Zjm8sf0m!gx|mhw6rQ@)0c(|-n!YGSyV8N8Hl!Ed zWtUpEyaWZOc4EeUV)37z(@J#j*>yKP6%!jbdHJhf@9C*hpsZAElG6wB#EowRk2h2h z$^`d0RB`8xTDi+m&;YK1|DCI1q&EhJEyk?`=@oV$h{XZ>|utF3Y=8I`s{a7J?S+r*Dsdo;-I zroL>yLOv|9n1`!j@!c{R9=rLS#8P@MpS>4m8iyTkpNAK6E&?%A8zhTuIm`>JhajRs zV+DL`+hosjodgZW5h|867Z+|Z9&ye@eWz1jg>pT~$$Z#L2<~L-hlz$AM|zXPx6kOu zwZ~YNP>90=)|nM9mylkszeN%|{Y-?0{0nI-tLVVp8(Cx>z34LD-~ ze&ku53UvOx7IR_eiHMp$FD+XXM7PGqNXKOf*;d_!0u7oAO=)2P-YA+HAmu}~HDx7l z*pMYWa0E9f^s@UvEhfxe-}Bq82ynadWh)qrx~QprXeWm6Xynk8#G>T`Be*^D>9|pD z1TTI;zQ)IZVYK1TCx|Jb+R}OB|Z+a{X@Wrmlx@Av~*UtK8aXr_vc`pMwNCvkjBw$+hQ;8#H z;M^XYM8sCHw}BL$Rza%d&@hzWw^ z(jfq{jJ5|QX@p*#26=`7hMtD46?0$bLAfbQYgKL7Va@H&Q*mer{zugY^P)WCk^-6~lJ?lhC~@&<#O^4PncJkgr6$YJhW3>gBM2M~*p8{fZEym>=lGZ|%e^MxPc z=3py0&l30LmS1rI&unvh3HUn3`{xjr4ot0MC5h;%synKYwt|^w{4j)tmP~2(MMaCu z#5dimJaU}V`wJfovi|Evb6*62H?0g7p&_~<=u_X=4GVd6l%Bsboym27sZ>Y)vEP49 zSS&Xsr3_$DqMps?nn1CXZ-q>3#PAhm2FU^oqqJ2vHW7_)&?q+pcc8v67LWy7QsMkK z$hAvO_ApJ>^@+o$*?jRF=!6=I$-D$%6aPS@7cE_4Sm6!H4^{kPBdCEs-Mb7MlM{Zt zU8RbtR?RXaDkBSF20R$h%#IuIFGku8Ju#inupM@t>|1L$9esz_Xx|RxqUhbm0=83U z|BYwkwnSWvHfCZ!|F7LUFl>?QR12F!HLWhu%Aw7To#kKvxH9d9U`zx9idf}KCv^Y6 z|6wB|ERr`~S$erz%puRk;)1S+-6k=@sKqlcr8aY4WOULmAI&B7-kkM@p8m5TGgCz! z_I`Wuvi8Sv41nNrO}GMnNzjQvq*y-jfq07ncW7%v(Ssj)TiY^RaI7Q!1uq_geW%6UFz zG#b>jlLV6BxrOdva{`0vVrkV_MaYx|wv*sJV4gAo{*yYR@Uae;{@aGf003Fx; zZ}-P&=l$DSQ4#gSoCzO>=qE zzIl0-34io&Px%ruC8)3jvA&{9m1tmg`HpMpr4J?c>*o;L~ zyel>&MPP=Ha8l_Aypb%xX?;eZ6?K0NU}a8mf4!dXNHiD}s6p{0nP)Z}`DyaVp^6T0 zG4a-hR5z>~1el`5Gq-XLd-g;8%3*y}vYV|YfvI7IO8d1#<7w*uoSE)IJIAbF<9ntM znt3$I#yC||$QRnLH0SA<4|-6TU;Hy!oDv9d6bcqP<(?y9#yigUhCF!h{Yrr%Iret_b05RhJOr)S~)OW2EEdG_8%h=e~x(tawb^@ecdk4EGp3*+OP!kasGK z&uUn1A9l`2{Axh0bH1T`*9=ttQUp*K&{5#RoaCQ2`nNvtEglU>a)l!D`_ehygE#sT zj5&nf*uO%fQKMu1p;3JVWBs@g6aobQNvbKOeJ(@QG(4tALf|_8`#g5Cnzp_$j1~=0RefG{#h|t@KL0m?MH? zig=T2;ol-6P1g@s`qC!phx5&qbSwujyDD}gy;9}*_r}0eI#2B%aE@E`Bb1WHs8mO_#G(gKfa6hoSOA_2i$uE^ayb8Rt8FzLRKE5C z)b}o}u9YwUAVBHlXt5;ZsM88dnuFerp(PInrOk1t093Xyt%ixt?<--SQV@~h=Uz{- z5tfWJHs9*3l-f~FxDn#;a@}IS$?7eTm>lXoBf2Y`(}IYt2Yfzm@?^xs17hO*ZnNOH zg_2s??}335#e`Jan!$7G`Ym?mUt?ry^X(*EmFJ4yin5YSMbiGb;PvbzQf$@#oMzoJ z8IR}Ma}U85^qg_8lu2p~6`3`Kz0G-R16Ug>nvo4isH3I8r;~n>sHg}#Am%UD6 zH4+9HVm?XgF0w!qa=6kn*(}KW%!RB;1aQ7~B6eI+zapJYDX41EgvK~h+sD(>TxnJ^ zbgX5da(^zQUZKFIT`B~^#Ny~q9le%rZZ4~0kL26H9YIxey`QYf@m*7KBJ zSYc8BITh!vXH3S@@U_N;o*DU_nFmir`B3PLb;wU`$l+@MgYLuZ%9y&7VG;yh6=zL= z4?h_WH~JbSRsTf+X)ZjUb7JfVLP+$!RHEiOE^`NpW6J(|qTZ|h?6!I~mXzYFV9t_j z#ukTK6f;&7qKMJw13R@WYigJblR}YxkwBt-m%x$D&u>pUnVD~kqSU1$s=U~Y3lhbC z*SmxbGC6F1=Dn|Q0Mzvvj?mFH8UB8oGTN$!L0nE_q=ToT>};4c=)&dh;|l=d_BqR- zO@al&1)Lh*xG)Jmz5seJ*>(Ex2GRWb-8xpMpn9`oqK?;bSW!_94UI1AEU@i`8c7i-?q6x)(J0w?e_BpMcZ#c($&P z#IiSW)5{w(iIt4&{=tRz1Z@>(Jg}mGiZ^s?s&c~@hM6PgKf`;dDTOCPEn={~4^T

q*Og2Yl-%0>2B-D*n;Wt4RC7Y z&wSAb_GF1_0|Um7Bjm~Izl~(mWWJ3ps(t^rmqtBtb|$3ePEqG|?4~aomD9Bp^%xFn zIB)dL-&OvKwfN+dd&y7D^+K%zrRhGL4@nz6wNEltucjcqX9|yST`)coG>)J%D91#fSVLO3l z)OxVenA-sEY;jEhBO-2=xK(M(d2l=hx;@<(83CEN{bXBqJ>+)+q|7Ik3%?*W%f5g8 zRwQ{HFO+lt?TER9EITN*&fCR?IDFtEv}VV-5yih$LE+XXTG%%y67`2Li;cwR3vDdP z-@oOcff_e2Qp~oOMN9LO^RkftGghcmJFX}L`S$GV_E;L>Lt1>M<&U={Y)$cFbDyPx!+)tU_82wg3jI0|Gb z5s3I1)yL32-sf2U9bmht!YMaaOYF#-F>A%nyFtlC6PXTeE||c#Uh^u zMq&u5ijAv(_x0h~HwB7knuz_F9??sFQPcK!9HuL2cAln&7&H`|%KwmHQy^>__mL0>rh4bqi12b`-#|v)*;1b75@Otx%2-o>D`xdGiN2CS+H97mb zPVXFkpo_mbJtfdJNB*m43OifEb28tz?t6HVal)6u6<}J z=x5P|b<@B7bJ@@4>GfbW5!q>1bZsjuX%yTajZ)0nnUm6U_f3l~Yo@~-P<_dYgtoP) z3nqYW*|0Uj(`W*pgKF2{PPJX)D(foG-;ey&efQm^0Hq;{XsZt{EigQsRV3{tVTvvq zJ{=p>xX;>w=sF1cBbmGax9OGlUneXW88i@to4$_3*1Jjk=fTXIDUC>LsKM2Jhyc*B zX?+t{Ric!uY_R?Hx%P~-qow5?Hl2ThZ*cxJxfGvN{(0vqL7GfV3R!>6X~U47K_k}V;T*{IqO9bMzQwnegi4!B z@jCPrAr^gKyk0k#gPblKQHZb``w5NS8VTnSYw=pRGv9iZyBqBX&7b$J{8K}q41QXC z=ob2Glk-?cT1)&Xf^2>czvwu0YYcLLF-`4E8M9GyOigy=>%Z$!=ei8cUPvzXWouXb ztUKc0QPwPNCq*LC%0une?p`T0Wv1+Wz8^qUW^?0TYKU;M_dEF@un=EjmE7 zr$%7_Rq-5T(O?hQyy0|8=^tZycq_w(%x-+${5YvbBnr1a{+cFVz=Wr(F<1a^GSi*5 zl~fd?a)8GdPX(2UGY&={Bv+QwodWb}D_oU? z^uoeZ716&UhNsvuW*>btVXqMHNLoU%BvC)@a36EMDy?JveK|+FqHB|kP4>rx#Q}H? zxVu}OD}{!D)S^<1*sF}q=2G770y;a*>VOMVqarT(pAl{1(8KiDG?1m50hB=RCC8qs|KV#t)HI%kTE($cV$ zq)+Q%GG3PzZzXWKFlyCo_y|bUujgxdashfOKLreX@qDQF`7pXhze)- z?fKEJjVuk+H%~(5-I)Wa$mNIs>i~J16QEdKW2_zb5{DBF4(KByS~?$l_)yAFGhg40 zCWEp4`HuODgU65w*pSM2b`L^IZ5}K$S^^DxVIV*t$wUmudjFg0apQ+dZXBI7L8ZNKPTUwNS|*v=9fQM}afSx0DY-<;`eYMp0W=fXiFF z&ke)%v4%UE1+=$sDkzx57HF2kW10(6^<2>_)n3W?2B zK8gX7`mc~)dZ#aD8oI#?IFWIpsEcNaQ+|`wla2Vh9;y?uOxhT;x-Wk(?2ic4(dS3D z`RW^({FEWmakL2}A!J34jw!oQx4{s#ksbbJDg4Gx-+X9ldN!YiF`GXNhM8rBCBz*WpK5u-66Q0KQ$abFmQNiT{Wxo@oUlc2DHjPFAq<{GQX^gxM|Z9M zIJ_jADh1+lG%atm4OTST+rEySSFd*Q+)hLhOwLIr#z``^xF7|Taz&?-&Z56$ega)I z@R0sOCy%nYc~*AUR!h>!AAp9Af{u$!RGeDTMMeJw1vIu4H6F7+xcZ|n;K!=*z#j~J zT_WTw?6RAfK-r~@&EG8%gXF-YttKLtQJo!xAG-~lo(v_9V(IRxo-8F1t*BXq0D{d9 zkWH^|f5C3)oPr8LJZwKporiVBn9g+DE~JztGsn3&f`4H&2v<)8f=tedo?jKPHRm}H z0rjILzy|q>x3~+;%%f`~i`!eI{0KJdWN`UkD2wO*b3c#?SZ3)ei3hE@a;M5Sm@=5_ zlF3;{3e0gFF3NAt?I7NE21sbA>hJzo+L7zn1Fcq1o|@!nq1-^|v@hk{#MS&#(>mQZ zMhY^4$|^*P|D_>2BSKB+ZWBpsOT|@}@DwkM58Q(MtYPu&Q8^5%_3LzIM{NFd zljmw~Oe<*(#9L6$IJskDZgB(BK?P)hxfEJYgZXmPodo8~YAZ%1+fEP`_xaD#&?Vdq z)xHCmo0_ncqh8FhvulGd#sM-E9Z)|`kBPdY zOOovTmqO_Nk+9V}wZGCLc8m*%g(HXw6rf`Sog6gyigr@%zldj-Z`7Oj;>ztGq=gNk zUbzQ$i?k%3lu}$bA~jTUVr6oZ3qfQ2K4A1cKQP`*Y*pPaU(D9=)CFrqH=^Rb8!feq zN#&!i15Sy13wLl%&PRCo6#>B52gTQ)4}O?K@D+O2L4>B$`?l=h*Cd+T#|oF{A`P)f z5b56rz=VV{!RB_+C@LyN_`rY@Z^;s>%Q?-gXozSO@J~D^FC3hM@i5P#;G9%Ig-W>3 zciIt(8mkS)d1R+qZEl_-w>VrJM^|!_bAo51$p=u{Z$osT(n{iOr4b;Ym6vyv47&l2 z&bH0WP+mTl))kA2$NqyqIO6l>+gwk4rAfY0Xv=vXb1R_Jt~4(~MRo1^$6RQ++zFur z9PYnOF$~sIVtaCV?`C$>mGBhCv!MvXItN(Ri)$G_-Einm&WXx!Q^fNZ!j|{tZw?um zl8H#o^8QBReKb8Wwjh1-;R_#7C_K5?`>FtI`pb3OOIo%PQE@Iavh!RhP3iNC=Qop< zN^UNIf(#{lIg3@#&U1PpGX>sJsn@!QmW)6dAjlpO-UD#^wf};iuL_Av{JUR|Da`#{ z@SimVpve|!J*Co8k1kSae)Rs)DEuI^)~_`VYT&~7Nmpxn0&(XsDv35ODCb2cJgEEf znF3?DlAZ(3L{@ndiT&)@1Ln|4I&S}NzI(hrfZX5tzeo?A`B>(e5Sn&NC+3^2h8fLq z+vWSt|HlgDz(FS?fUM^e>2EHaI3L}k2vOkO5wEe?G*rlm3%GNq3!js>`yNKjM|PR5 z^KzD(3k=TT{%ej%Ut&NOoe5O%t6<|ab_vJxcSnuM`DFAiJuYp*mQ-osCW8C6kaU5J zAKLg_I5$QIAY9uJOuFe^ikM-Ak_3_%iv3=o5UVB?XmGg9?KYF5ju+Z#qR7X?Lg^3k ztJNJcjpcPk?+>i*i39aZ!u&VWm+@G%Xs;w&mkE`rJUFd{sk5`t;c_n@Sq@eaEK)%!zBL z%oK6hn`D*E#xKBf;r8tn26|b@S5Z=hTr=t*wzV1rxe*~p9kwo4;*8+8G@cgK^E-aq zRER=_epB(C=hOUwNA!C>i2rCD-lh(2ytyQ;=z`NSmzDZWSdH5!30?qw6heSzs578W zPObql{Fo&F>W~(haRqg@g^+nrv!-G&$Sk-aXy?q-ZpuJ_TbWO!EIbFya`3MKt#7@A z&+u%?k7-!GrENNBTS_{S54$e^P%Srv_!%gKL$H6e&ob-d<0J!k;IsJM_y^{{2{praeos6^vQJ@>--oXS?T0)e6L5!< zt#1c^miFsH?oKdxmGB^LxTn|sOA$extd5KwX8ym@jUk_+EKxKym?uyw9xmoh9R_qg z0s=8mIvx+KR3d8_>#-MAhzi=CZaNbZB#qQNyKQYM7U$j;;^71YO<<@@@KPTYwyiaY zgK=~S!MoR?drK5Uz5}50NI3?5h z&JF9d15hcH4gr;=+%{WQ%Kvnxdyyg6v{Zy&jTOwooZ!zu7~*#L>eH>hA2&DzqKS{= zSv8RVD6Ok!@&OnqN`|A$oba8kS68Lvxjap335yQ}RZ>N4ASgudfiXQ)J&lEjXo5N! zHC_jGQwQnVVJ+2cX2A3w(h6#Q{^P8YXP8e-#|`2tS4|#>K|#G-jp`9d>&yI z@wD6ZvnuB^yfwr}*&Hny@Zg{g3_3MP6QL>a98X34(BtTZmY$Zx-~*bnt?izr~0qY_9;yBJ);kM2`oZQ^Bu48ERpv-)Aw?yKc4x z_wTG+@uCUy&L!oGp_r(OPsP{B+$ya;sxF!Yh6xUX;a@^Ujs;AyBCyQ2Jv+$i-K%H( z2Y^)i{z08b$UO=>N82h6Q6S^x*+x)+)4D)nUrrZkZ*flKrh_ZY3^|NgK$RoXFBx0e zFdE6$0+l6Pcp>+$gjSC}Z-{ywXg(17`ZgHN_5FzqTmQ+n9prvj>-9k2xeBdKf@04= ziq6Rx8O6={rG~DyQ9L3C;N1^Zl#RKwQrlDykQxJ^^MCDuCFrJeJ;`SK9fCMZPzp}djI<6@0}f!z3IZ1=^OQbTEv(jC;3E0^bsRyNs#=h)SDbO1>{rt}w|_#$_;~by;-hiK;JZPynLQXRiHR8&O+;j#!?&&{3Md$|W8^@6YP z<*h zr^vTYRC#`w&I`>0=3mU^4zD?Uw)jCMud=8JQCeF1 z=EzDcX1+qJ-<)%FdH=AtSAv^|$7J-=izO>qTU*;2*Bv?1j!b5(q1t`UbYlt+$<;ZI zcq7Wauuk!S^Gg*v=S6fJf*L?+ac~ejfg1h+W%*Nk+bUsI(G^WxHqL z`lb0s*@)vuiBkPMv6OeL&BiwQE~@hHtxMhn*VVSr+l{JlA?lKSBcC1c@Yi89h#%lY zdCBD(RUthSh>bR>(i0z|-QZR9XPIE2AW@ksvY7MtMnnK_E`Q{!8m3Zra^=jDyzd1- zhnu&U&sI>R5ccQ3^%nwE!)U)dCe3~_{s?ML1Lx|#x`y3FlOLX1Rg4&Wf4QyNMIgnl1)<(v+>W(jZS@y1z$FX&Xi1Z^c(!GHKYzp`nw~3K&$;e{#>uf@& zMy=)-FX{(|Hz-ADxVDlN@bD3tI1D>bU91qtW$2$OxJJ zl*gXOLg~=5`Si&rI6Dw`Fh$3M2>KfNYJ-@rHG;2H2yQH;bpBU*IO9gu$xg&wvN9=@H zLw5nb9LP_vT^ zL(c8Vu+;J#K@T+OOvT5&gvU3mT17A`>Erz6xUvwHX8R?$`X!9!<+#4S*R5HXon&m& z5I4&#iFB8zi+4{S7K{S3xwf^H46H3FECxGWO-ZIG$v^0H5TCAV$7yI7{&ZuHHz;Ks zjfIG-s4D#>L)z`lRn~3hVF1D?Lus4c+jVRlHTYi7e^Hbs|65(3czMC z3-5}yU21P-nT(i_#Sr3DtQ68S7gD(cQkMq7)JOd?ao!iZDqKjn0vBKTbmfFD#pa4O z#-SyFR@X#QFQj>{2TeD;ZsBw(Zp0;L!ks>aB{ob6mGsC9DHt_V&{iJ0%=YPLVJ=}c{ z=^nAD{I)%u4R#e(qN8)8AZ+Y(CvtIZVpOOQR=3w$6)BD<`WR03i1Jd_muBLN7G6;}Oto zM;v<>cO>6RClfAk;(XF^2lmCa<_-+1cAfR0S{u_5>zV;sW&eYhHL`b!HAFPZEl6fX zdgi~>!NC(dp&tXL!swYwMa=~{qj`7H#m(W{UKyskNuvi_P|s?;P_~-y>g@5B1zqPw zeeT1AkEJpz)qxLo>H9L5+eB=J%$a;RV;|ekhsa%)9hyfWht=JNmcy$a3kG#ehX9h! z&cX4lStR%N*5~idl;19wQ{20N-$~l-o(|AD549r1*rAXzrE#AZXw$q)S|ICkMIlV;HMUR;C?KB4aBb^(<4H$I;HFz@QrA+81yH;bduY%Pmm{?!;AUcC zf~=vF#K|nNG&VI6y?F7$m07m|#cS<-$#}-$$;r^j$Q#c?pxju2a@yVn-p+y5`)%O^ zUXUVm_1LyQ#C2RII$>j_@#=C`fN*|=my0dfY;G(Ir3arsJ|v&dSS}3rH>*^hGbQbb zuFL64Ae(B$H*w+6b8Sain#9deTBzhz!uQZNA*6x{K-*N`z3C5rrok&^O8iCn=qkmBvHV*PmX@W5gU$KxFiFkom%d^Qm6gBZT4tbgygpg2 z)9DA$DQR^LjZq}b({S678BWcyjDj{Nb-R;QW$^?!FW$`KN|Wa0^|3CqBiFqCb7kSU z2ZMlC;rT)aP}nlHkecrG$`Nzh1!Rxg%5Z`00$!`?Wi=jw+JnSK@enL8wI|B8 zs4L3_&CIhNBBn8eA;wepu~n!Ce0R)(Z@ykywEoVa>$hV+}WZzpPC*IIanu;_OI zwhT$in`hJKx>whxvQDboh9bNbm{h(&pBDePN4jw!G*XSA0OXr zVh1wRZ#i~9cy^rnpC(B(a*?_ux60@^eKTq7?xt?{JpKexhd?0tHe^43{4lSN00er! zivsGwU!vCn>3?}iL1AES$FH|V)a4FPkj}qt1}w73_&F=S;N>G2c~Kr~kUWaCJy0b% z^Qq1x)agd?y+3UrX&aFr1*4rPr@Q7gXL^;fzAB;;@&!B*nrPD@?!}YTUDUbEM zXw!(0?zY)zAGUungDCjdEc$qi0zkp@w9>bf^+$4@Z2 zu|LQ7{^La$kLEY;Vo_z8%isWS<}j*rA*lLf$TD>wZ3tJ8e`PIc$SVW?x38*MME8oh zWYW9f)d8V!w=X^}+{LE*3?j1Y5*rs4rf|*#&S|iAq5uOMbdoI|$aU811Uo1X6T@{! z14;tyfc6CF#YwPDf0o)jKybf>wlrem?oRMd_#DB<$H&}(AMNw|PQ&!K%{FuN>YAE{ z>c>DiRJ-pA9^{i0#{b;-{LC_b_G*|w+R@t&v^uH^v?NvvvKtxJk{Wkx8?R^3r0yEGj#7kt4i6Oj*aIrswj$9-czpBLtu00r6rHo#Lqn=`j%8DdNK~Zp z=4aM=XeIDuP1CTSz0*qsC4=iuuds_vILOYZI{;6!CVS_%FfA zym|P7Elv{hgD2R%(-z#3w@3b$^jWn-s#>Xg4&ez)a<-w60kc+T$< z48EXa4C!nYFpzl@rj$^Jd@174z7)ksTMvpXdDcES{_%Uz%;#_^-D)rsl8A1D5LwDS z?~{D5xeQfokt>ODVFF$n|H#-SEETlx-Ad8hlDrr!x|RyKy7cC)AFz4Tp}i4p4Zrsg zm2vW9Ts~)##q)PTujsiO^#)ePF>czSHyeWoBrnvxT)gtrpZLcj3t+OREP6k=^4a<+ zX~*7MjTa;H*qmW@KNBjk#GwDs5yPR`LCW!~wOz+Z>B36ISW9_V)%>_#AIr9!os!3$ zVNr{=YoRqDyo~I{=u5nn`g;o z>;Yfk(Bmu*4MgmRWzRu17qovL#W7gmpD7G{0+niGgE(x>`aa0W;>C#h2IW%3{S17~ z!0^W!VzayhUMYk5=~%AnO3??MNHOg6#?(U-A~V5*UnGOqd8Zy#xhAiWhp$0l8TUt{ zN+_-E2dNhrrJgIGiB3TxcSv5d!Wl_fCqixA0Sjy>QK&S6slesMyL_IUS*t_yNui*X zFDOx2pV!vnPIQKBIMMklOnDLkV*)ldvCjdBfH`BKViGzekL+%PRH z`2NPW6i8IR@ButC@`qPQeQMJ@$(?*jm<+nf2A>YT3pKs&xZ3|9tTG19L+Ep0#!)Qi zUl9rh`l&6>1P~wp6oTdA)d`P9bL&g!>peb0h=){%uDz)9Ew}Zgk+gCEzlejEh2j3w zw;*r9P;D$^9!b2Xv{GKm`53!#KH4leER3?n`RcLTuM}8g9J+9lu&oD*LDiZRoiTp{ zb?@nJga$2y+i?L+6uf~$mayf-#7c#mb!4?2eW639lQO1r{h#Ce2<;_x7LwRs4&ja& zMW4@3L<&I$JLhV6ff0_x%n1U}LYS2JWJ-VC_n+9!ZwT_f}rgf7@Z?=Plpb zOnQrem+`q^Oi{avkPRM<2u2ed2OCC~YV$=!?1bxz;w7y!p9ZgiE>g(N=n3on#e^dR z68wp_PB=?kSm}tDGsy?VN;h}4qDir}nb6X``xJ~3X@f?R(k{yF!OXl2iX~)dorQS; zIH9nxD{ijg-$HAxE;#de6NKeqx4sY-Z==pG;S>y6p8H?D&vWeGjoyA~aKBVDCAUo- zD3*A7#-Zg)wf6&cN&0d(5Qb<4p+z2AC}ib30mN4dmkzPW@A>TGrbB8OR)c^52_$?J zk!G!Fqdk=TsOuw+{Inu(GP6h+J9SxAcA#1fQ1S+jNFd%`>70%Q6<}=*`{Zn;5+7z( z_OW8jheV9=O<+dl#(R4I1V>B6YbL#z(#%KSN6sr z5r(r~Q4XyI^VdL^L!7&aG%Ir3_#Mn~zX?CHX^47aM4hmZ{Wqn=T8jFi@D0l4gpTuq zX$+Z86Uj}o=f|dapz{T6EEagO4ll3}7ZyJMY^f6DGqY>NJ(P=c{tTn2@Mh{;99=R? z^OuoG5T};9GVP(-dRNiKX?8~wuxU+*BuQgbtXa&PBl}$M>$s|m0l3N`&^FeRz9Hnw zU8OYR|G^i0ZjGl@fm%`i+oTz&zISwyqEn?F_X50!Gfd>da6IOXYi>QCAH5hMxeehjRuuJ6u8U{S;P8W z`dH1!ADbRcw^wiuSrS6^5B=8Z7{HE|aC9hrsQhNlQWfLu$Aqcr0J^%4BS;Fb7HOhy zjl$@@MSZTo>nG%khLBmy*m*L6mnC*WE6$M4v4XEa^qqtpWMgoW!0=EhR6g+?emk+Y z3)==W!YwvTi6I=ENZkO6nf=uU(7$qxTk}UnTwen>2yLhnmSO5xaTF}6wi>MZr6{VN zDT0eE<2|AsE`xR`21^v!_CPmO_E$J$6|Jl0d$ejvNPJ#+_Ja)t#nw@ajfA{9L=%Qb z3l32I99JHxVQl4AMqvepEYU^VOzn}P=ryv)&^Bj6J4s@m&!`^^h2Uvj84kHZ!V#i5 z&75W12G)8b#d~dUV!qQkxB$~X^lE;xhulEs#+#fDTn``oGDCJv2thY#)Dp`kesqT@ zo2F4Pc#s-KPAd{V(k0I=qRX78s=C}HcES(ZRYKaQN-)(sMA1chJyM@pTl^zt$Q0{C z%}V`%Y$EY#=oHQe6?xh{1GK;yL9pF|UM_gghups@)2>_g+#M)t>lD&t!FgU3pXSufR+{IOY!m-!2++1MV8(z5sVOrJaxFx4kJbVF-i+2tLgSDhIB=Van_>$g#F8I+_ zHG#td$s%76D1s0xM+bW8^P zVC+EVy7LZ-#2CDW$(K4$s#I~TTw96n0GkY1RG&?Q5+ybh&rr1NH z$@y;1y8_&0P#ybgA?+boT6i@-ia`gl2w!iuWOoXaBgGHjH=95hD4~xm3Kc^H2pXrA>8memEy03T%APBX=R7PrDU$YF`-%R{ zd>@RNt!}JyYat)5QXb#nEh}Md8+*YpYV>E2u{rAMj*+@Bmk{v4mUJVAQ6}_L*~^aY z_IvLWA)qZpk8*ZjL!Sokd#VapzWH>Z8NeOHLL43{4bVpej*=+uh;3FCTqzqB6R{o$H8KU~tDxQgd_rM+u28|0m3j-(Ef9^*QeW*vS>V zk4KzuZO8Y0>j!6=p*OZUNu*6B1j+cx-A95WgI|yk$)gZX&K#`Q`f78E;lI$Sy zV{6p0_usMv+y$o!NzS4yi0of+y`B#W?SJ+7-U$xgL@_7QGOgzT$+ihbuNzMP(yAxb zEjBI?CAkt$_Ahw2`m=@IUsx4sv2|-JMO)HUNF5*`Lk|wO3m)?je}#6@QO0K1=sVT<&>CKX z2YN^A3Gk%=vIDkGmUl!Rryl-e-Yr(Y%2@W*H^ro@aW?}&^I9d zJd{{)ZV}{$-dS^%uF-WH(P?lRnHLIen`Yuq0mfhWp#5)p=s7bNX=`!uZs6P%f`jFO zth`mb1z#~%Tet$>JdwN_U0Y@`{Rxnsm4DoF@ek{P_~$djYV6D{HjP;!S1tvDC3K7Y z_NsxrrfmGV&{xe?On%@NWp()KP_#FZm_$?E=RDCvqed^bLpWt8R)81H>B%#>;Q+_>L+(R_nj zJfG8-Z<3PsbywA}am}Ey6!ncv@Z@pNh6xRC4)BI~3P5*ciWvIY#r5N1{D?c;*peCL zpMBrMq_uTVz;?3ujnkS@n+~2q@Bw9o4*4P+^F=26w+ofRBWY9k$swR09$LKk65FCN zD04rlivB%?WXJs{NSmn~3rI?yF?QkQ`rjp|GUkM0PEmU$6ht}aydph1(3gUo)P!Nr>m zK4onzuwVE4!wUPF!-zbR07G(%&0Ew|O>bkaKb#W0m&xt~C!79f9%9Jg{BzMll(BP^ ztTBh5*Lat5kwc#~3Yen_q$#Ag|JQufS-ri2wz(%{lPF~wu3)9TZ6xJ8^XDJ!d^HF8< zELRFlc^DyI3`+*X#5MSOjXdD-s>Y5yp&N=BSIn?!PYNNupKTaog?y~5$LODI8{u!C z27tHd+#QK*_-Apd1Z+7}z6GO4g>wY2n}Yyn;1do)BHe!r;3ZQs1^$)$199q*S^@xA zj)f8<)_WlcOW(N6xk-{tlAG`OI^e4%xpZ+~>Bg)P8i~*9Na*d6L(@r+YGKvGGQ}u( zpVOsor3Vrg5`OdU) zln!(^OPQ;#w6(Qs5tWLA*37OJKy8O+!M#+Ie=x1JNha{zDTARM??FD0I%6pcBl|D( zQzL0d0Z4HJ3Ct4sR#N|G$CW>v)d!0f%QoJ9*9AJ7x0d6Lv355QN{)`j^daZu<*!Nt zvd;h;&}mQnrBnOb^srPyfS4I`$^X;s9;Pc*|6XyP)BF5}hH7m+hVNYhM4?5js|$C5 zn=pUC4C2Tm(6Dkj+%9wu=oNk^8A!>##{j6>Ipp4CT=ErH>&GqrXFOPiWQSho{s?xJ zfX4BUnaLw4(^s_9w-hz^bU`Hq8^zerU8CcZdT?)Kc=hSKkqk!-M?_CG$GRgMIbYw< zx2$SKr_!HnktX_~g8glfegP@(S61GwR6+czGR}OCTsyXRjOF-zKhP*1QUdm0!Hq4z zIdWjH{BbzPzVI;KoCs1|nI!RjH)Blz1`WbwWM-aEOJeu#xb`~dsA~ABPc0Gv$7vP` z$`EspfbTO?_cc9_&RV!x3LWla|5YNU{H&slo4FIi9st8@LJ zu4NK41@>q&ndZTN9#?RloKx+#hGv$a%)~55%WqS1BKgb3>!uL4G=m+lwvB z$BLu4Su6RlIJcQ*R9$to1vbYoiw@hxnD%3&Nzc5IJ-^iaVdRIad?P5wzH(#ID#hb) zX$d%yV2PO&1^j~J&7^0KGF&Hm_CvC6?4ljhv6+V#MWr@`u32%ZUs>5Wx9}m%Eq1}E zKwS@v@7TEtn`x%*3Z<^^nUHsi!K4P;YPeT?FjaUo+$a}Ephs{6u>1dFpBG=~F_-mZ z&&CsBFK!F8HL?>D9Lgo$=3vqbc8)9(K*$%PzRV@`an)fn>cQTo1`q}Ru6(of9u>IL z0D^!L@uwWL^D02H^*-hpZG7zSuTgicGPT}cnmXs{ghb4Y$^bEaW^IS|ba%-QWU-D% z>?x%`XZ!Sm$1a1GunK8!k8RPW1L$-?Xo3fgmhk{Ckm3MyL(Ir-*qT2WFb!KQY#2dd z+XY*mcPG}=^e5?J)Ti&cJput399}Hp3_vX!U1$2X5y`}osPn9+jlNgr}LKJah45mI-tlxUBZX~^fmmH=Mngc{W zsFb$-O;kg4=xTs)M>R6ACf4z;0Ccmnv?Q#)rW)8di^DlcKe&A*wP0C~u)AmY)b#p> zse0J@*nyLdwcyP6-<#!zZLtrGF`IPgEH@*xTv?`LZl?N1O}F)_1fXEIk@G<DTYwCt?U$en@+(nX8c;yc*CLO?tWkBmNTk9ANtaF(|a(bO19(9?Ah!-&wBL z`^eSnDA*(V+PcH4(XRgfw?=QF!1MEI*1+=NYEk^>({25^&f)44a{jrS8T3DZYAMo$kJCfi^<3i3Cd4_q1q;Yg4-~DS*tZ>YD z`Bt?6W4v`-984yuURH%tf^!5@2Cj0xIf*!4i<6bWk*Rikx$OIV1Vq@~j(_0t12%p_ z`8AgtS*hQ__zVL)$#6HHmn=jvo4v@?17I%c*`irO@4ox%{4Z(thHr{^{Emw>g+0t+ zLh)Uow7~V%MhYH8C2d8uJJG$%;j``4Z11ZP8c{Dr__0n;?cy8Xn)Hbiv8ZS;w_4)b zZ6tCk-MJUdz3KtYbWK{VotuXf4#?sG0vn+2HyHif4N?C6K7#zb#W(8x-6!&2#J-MN zVkSvvtM4;hjR!MlXBLwF{yV9%rt38Hv|^nyLAU7p=0i}hrSY&+%8 zrv(k3BI^$VkeQiPj7)*|Mlj%fuEa)A`Jk{a{7v$>nTxD_n@5k?EjS&MrW?*d@Qs}J zer;D!{f?qWnavxJB`pS);44e{Zxw%Ho_w%vg2c9*%s;`SdQoylbF{zs?>U&@4Ht!Z zB2w#_4pDz?*qDR76f5mNuo^TIs~r!~J5XS#AvLFAt%L>HIt2al*V6_ZSNSR$*way! z_A-RashfW);TE?eY8!@rU}fiDL?oV-50Zo$;JM#YTq}~WOAz0O>zWK>y#ZrN&|mVDhXcL^^S(*GsMpIsJ>xCPcZdnn;4_ zb3-&<$b9Yk0G!^{Wt^KQVV3qd3yci1!Ob(;NR;@b+Jo2m5<2e5io^7y2e1cKGh9ET ziC}{rH#Xi~T2$b7LNfy5P;hD+1Vw41-L(%vJs?=uh|sjF_{EDwbt9OV(MWOb^dww@ zb-rFi*4-4Y;FiAlVUPziV`M}dpCiR;u1BM;%bBaX(BOVg!%^6I6a7mae<}CBETtJp zhF6H-g}=v8i?+iyz6W$=G8czNRB}ihgU2gQ64}PR_&D3Mmz;Tzm;{Ap#Jzf$+=kd- zxWh&TJW>LTY+z*?A1da4B4~{ix=ggh^zdx&rg_T^u1&h!yBcBVhzTN;%zwbHt{BL! zU@|A3SL@%*LL48WMYVUL1Z7c_z?Mc_3qx|Yi0uAkcZ^AUnyS5v0(P2E7>fax8wUCn z#gXYxFg)q2KG82idU{NZTM)Hfus2vYGydgunHr_Nl7F1Zs;|rTFfDKt_k;O?+TcZ{ zmilLNn2+-8-ROdEeTywuj*XCs)-d?dg3s6iTR3>T8wpNCeBaghp(MCi52iv-0kosg z%#zATfp0XXr?=&=Bz_4$c?~7U$3LKvlmumc=5u_6n|5Z|V1+miFWwLQGxM&j$OgA0Nj|%G z_aX*hW{iRxc#!wCFKC*Ybz(=~v@KBgC#>xaaLFQ_oio{~?AG?%XWecfKAg|~QB6YO zz1IE`dWLb53Q;vMi%4wpW>;w-YpmD#AK3=9`NBWvKyIBuYzr#Hn^#EqW2;C2PJZ9U z5|-Qe{2T-~cj2=nQI-Q8Vt3-60q)G|4LI6<5mZ41<1N9GJ&6&(19{jh@XEiB#8(R% z<1jPUB8!UP_0B~gpN%|<0xK4XZ#E?lU;V_>P{ZO}U-U}~epb*&MjBN83lUluc`xZi zYr!_)J}~RUt#c`dPIPekN2a3i%Ry2uNP* z%S*wa2Qr4y+V=Y8<3%{)qP89kQ?r4oT2EdX```mORj3(+&i(m^6KHK%#Gkg-`+K$$ z=rX#Mb2r$*gO06bl&sMC?SKX%*4}md+D&XtogG(=a<*$=3ks~~itk<`#9vT*Qz$@l zonzf~@Cvr-v5qRaz`k`xa|2A8Ztq4_xHv@C20(_bJvcKs#5LWB*Eeazx&Y0h&MLhM zsOLtjT?mRV904L8fO_SSHc5T(FWb9A?zOsRyk(FpKZn07Z)l5hPUi%0@yK1l)SM~0Su4mB8Z z7Jok59)y_-julwbZ4hh(85kjhnH;);?Hp!D_I}@ncRPXlYBvX_>rz+9voT?;s7#BO zV8blawx;a|dfNQGp2rqRCbL~Ba@WZ(=orJFn8)D~ihV|1gp!Kabjr1aZ)I(oWS}iY zP-1%#gUjH_psiy+;DBlA=h%pN&BkH&HaG+vbWRr~=P~$<_rLN#d}Ue?{z_YB$v<=G z#~>NsR#w#pC{Z|+@IMS;uAX4{|9)K~czxAs5t|K$-SJi<0=L87)%F7N-PsYC(ih)V zwibc?wY+B@J=%O5A-=yhhThU1i3j%Ao(X}wNZgkzqi0Ue#Iu&z_k;_kh3Aeg?m%g@XEuydCj!Mis`GCF8expmvZEoWbG`O`)TAxqHY3H+)pnwPr}w?G zSgfg4N0T7+wL|F;C}MSRrx@UcriL@_N6f8<%qEEuZOb~xQOfk`Ac3iqdq%f9AXN>w zb+MxUS2!PX4OVJ-`3+-hKenat@RWjaB8W2$7f`3qfvU&PJ_~Q{ImN_w@Az+ap2S_s4ewzYpH)y!!VwbEA|5oqPvE-hq4&bald2HX62mNDl&jEuk%Xc1=2~y^U?PSU7d(OcWTUvxQw{Bi_1z zg|6|z;!nn>_k9;>s%!vVom7=j4>(9BizH|+~Swb5~P11g%Acg&rgRE;r zkd%=CbAs1n4fQ@lfBU4x^YYDKEKtON@)3KFZO7SPEtz|^RJf2)jMt`xb+x=cNEN7O z+86-qk~hdL<8oG0G#mMjr8#ee1CoGT+O;gsRkK~u^S=lsrp`+#Bm)7iQ~VAh{%2~? zjcV2oqTvAdZ@L+rHU)^PGs(ZqFi?4Kz;g5PZRbZ&g+O*fHidf;<0)$1)1`cO2E$v7 zj*f@Sknd)A310i>qkC0W^+bGEH#R9F_pHJlX8rUje$1k@=w%mMT9InAx|==1X5T!C z_62I}w?LH5FY`Xb-$oIjqH?fLOS|Y`EGketTeHT0N4*EQ>x?f2GVB(X)# zrBDB?l+tSUDO}>9?+3Z~-5ZiaTYu-}arRgDqMw~W zK-r|sC*1F=44Ojr-;A+yQX4lyUXXL>dLa*0ILY*>p-)&(h}wQqRWa_MBoV?)W=8zV zdLCDXDE4nl6QZvFoJqV``Zg5z(vXhq51RwGlZ&4UBL2Z3l~7iGf=> z_X=@yNOv%}@y1Xl>G9q&VlgM7Xz?gjuBgWU&JzO@bF&`|E^i--=I4r^u(7FyzmWa3 zxV|%d?dh?hh3vyYr1YhBF)YDICq_t1YveFiQ3(l&1=Yk&Rvw<1(8HtuaR z`SC?C2PRk%w$hX`0sdXK+~(79)VAH-7Svwri!=+2wH#y+nv9$hQa5+n)_wYoayD4V z>Gx1K^>B-YglyD0pa!ZR5v^` zH92p@BQagh-jJ)5QM=!t!Qrpy!P@;ggV>eB;%kHHSl)Ehh1Qd5kkK7De5t%dhQ?%I z9!#k1Uh*{-9R{x$D9%p2Jx`7XBQnmb*%~(^+66iI zab+EX@j`yUsFFmcm>mTyp{=WNMFf}g+$Ni1Uf%;jn3|E(DHYPR*GjhT#xp$e~l#{wE zSbHC-qXo7z#j|yi^>-h|ndy0<6>wl4_(hA%tbESkRd;{^`?~R=R!|5%%`UQ%D5mh8 zSkUnkMyz>S4JU8Jc_MA58pEGsz#~sMIvoTtai85wS#}BqW3^7~Ylp8x$eJl2D#VJx zMU&lHL8eR*ps@lCAAHc5-_98%@^eff%r$ZtRBW{Txs0)gZgnQp9sP27j??bM?hfx} zv-bHv=0)i!nVXxa(}@D5KzV4#QE1OD+uo`M3v1pfjPp+u<6jHG1qpkNJN#<#6DlC) zbPIXqsc*K4;F(J`T!4Ig_6OkK=S9&DN}N}}nEILf6FP!fkJi0Q18T?mk=pUHpWA-?tIV zPB0j(S}b~DS>`-W>&IDVHsu~!%opaV@I&IXjVUMHNbp5`Su}%)hSaVn2ZM+!IVFaw*`qT4gawe@V1FL%Hp1sFs()sk=T1BJG3emo*ji6-_4uFLMZ<`$?x&+j zHgiJ)r~21o8&d9Tq_YXF<@1GiRQ;Kh+i!|_^i@v#bJwOnJe_w*m$G^GnQk=;3W8a6%T_Awr?3PU!q{&11m&ag;(s;vbRR6T+O{OX-bG=WgJdn^2-Ej( z=VFZo2=p;iI?=|ykf*kp;J~i$;p@Z;3n>iFQFmV$ji2(OPpK8R0__tv=cZ)Oz1p|E zf2-pGMevrJfX3tr;z2di$vt*ws{TZZ=_f~+@C5kfM_li%gy7C6%-Dt*3F9k?*a^e` zPt;LkB^ZWplSP8Dncfc%YWUD%kv3=Cm>=?qpa5|6G7MQSCXx zA@3eWHzr_P0d0lb%B|Xyqqu)F?V#%HVlCYXW7^c@C8x^JXGg~uF&yzAza=z;THdv_ zk`kE6(Q3bNzPr~`*jd}V2Nj?CX;rlXvd~Y_Bjdbgub4xfi$54y_Uh2V*GSzOqkD~t zXK_$U{@9WjJ4hmRdMk^%bwVc98H01q`)r>%)bdOAT{{XU9CARZSHLIOItt5*nTQq< zBL&(j!g|)1c^$)~;{Wbbs$0G}(z5R?P;Klq6q9@C#+3xWc|{k_Pp@c|XY0edMCwjp zj;kis9>uvkz`hSFT<}@eaL+Wq!F#giCcEu5*<<6GjlG9wkDY?~Y3KMJB9WW-0Nm{u zE=$?E41-Yo?npADEG(Z|lOeyu=_wqqAJXEDPLEpE`lL3N^M9o+kQI!H zyjs0b14SUS^6(6o87C(h0n3Z}#Yv36jTs?~q(K6=p1b5MLX8oECQ%qYpRwr7*da%} z6V}1S+KF}EI1oE=?m*POE{iQ6I@K%Mb1sYSg)o3f%?PBoWR?`4*j*_;o3p~0FRg!H z$M;`RAw4KW3s~d#>p7Q-b-A+>b==@LF>dL|!f9`>6$%GklIX)T647~tP204Ol$8(E zmABaQlrc({XhPjB2WgNDqRV#P?{=u1`<2|k3h<;23)Ckc^w?n}ShRdI5;WE)B@;Y@ z-!skO;;&#-_DLyHpyMWtVOnz<<>$h_A`L@KeBB`^NNN9uHSV{%j1?Sm1xONr-D_DX z7{YMqH|YSjO*RT@DTw%+qZMxJtz%t-je*O549i#Pq_PtH+fGM{V5|~A50FknQu09y zh4N&$i}h&*WvJ}!`TjFc#|AFQBTv}D^~Iu+p<=p<-LowU&L2{K`^~^;x+H?XP5p8TMV{Bg8IUIDtD5mU?8F+z5JbI%9*)vT|UlqL#siC8yZma3` zbU2swB^T)6LY>F^5!cuih}#JRxsMj~l@JVYx_X z+T+c;MayC=lv5;Bcq=wu%kg=;z;t?NdBLW&jFB|t%h2GPy|gV3tE#!F$_xsSwk9^* zemuJ#yH~-zuQAa`k-{5$$abAX#s2p;mPt5u7MAB*35U~opFGygJDc6F zepgo0DXXe5=;D=+Mm5aGB(oUt>c7$1=M?kZ53_Zr$tGH+eBe=)sjX~(jkM0$I>t~C zL{&6E%reXM_%B2OLP5OpH3Zs^BQ!WAtd(TT=E#SNoZ2pkO0w_%%fL(y{Ve zV~SwYj{y@Rkw>6hdi*8B&6C`l8&xiWnf+-~=7(>&d z6unWwG`bP}oD~xeT=~pXtZ8|wEG=Nx`d3^~YkvCf`fXjRx5e{ZIPhs4b;%;-U^Jzr zeo)9QD1f43iNS7h!d6{A-;28Dk5RC6GyED^kNwozUqeoI>sPU47NyywE+qS$zL_e1 z4sx<4a)>6;3H=m%L8H4LMoq)g;oQ%I7Fl2A$j{idkya|lHF@c)(vuq$GaGhphIA@h z_Kupc;iZX^mL>_A$*HYhVxId(X0v1?7GyhBDbpdM^<^{SLH^y?MGWzX+x+M3L=a>C zVw2~uSOc0Y<*l_&_H{df`L^BWd%+s|X`bN=mdP?o_8Yo>VQ zHgW}eP0Qf+fbi4n5O?(x+0^?{lXG!fb%~__zatL1|N1UoCVSb^5P5tXvR7M-ibz|f zbhd6&2pPWc)}$z7MTMH@QBKVyDjTz0n}Ahg>hI9?wVyY=C>PlK7!FS)?Ov&*4pXb2 z?pGSGECdl!!=e#IQck7&IQrDY)h(}a)HSO#k$KSbs4dL?YzaswA@0xZoWNFl^}IWV0&z;kNDKl9f3rPEj|CA*8z|$AM5l0>)Qn zxOu!sD_=n(&0_M}m)9$EtFXtAsAc7lUSPYVS+z)*Q+b;%j$L!O4#q|5=EyC%6*z2( z^OReSXQSq+U<#s5&y3!OAoXZfBkx=D#|*+&=otw}NeGY)9qpt`6a@_ZxSXuK@N0by zT6qUdhT{->E1LiOYOEMM@AfESHoTt@;x7;fKSRqsGat&mgb1BPe69(7zdFr_>KXl# zOqqgBy16K3ZW(30`)Rx)Eoj89)RFV8Rvo)4u8)iwKh1PNc90}gL%$@5oJB0fgy*lz zC(PRH)|}371n8>t#Gg`ypVQ%Qt;-(mnY_lce9q}^6M|;e6i_(%fr7di}HfUVgwTsjzbooP{z+*II19U5RpbiK8=wL zXU{o8AO7}!*d$!P&!?C}s&Y6eL%5*hgm>#yBvmkw8n zbnpk?U#i=eJrhrPPt3m;qb3OcJFhz9)`Y+Pb!W~Jf09!89(YKa@MVEo+K47xoH-6m zR&ahzNEZjP)11{0x3t8mslEdvk`BS}q9uri5s1LnZ#ilJfFg=mh(8pfP(Qz3WxqZS z{(ClM%&Lc!G>;TR6e=J9OUI))QkWO%z=$5ty6@s-R6NL~QWKZ@ad*_B(2AAuEHxEO zJ6=MZuqIwk&y$89Da5};5I;Jz-h0;J7N~4MASfv<#2}mB!fJ{4?^FW46xG2F;TMmR z9y{^z)&Q6MT(rI~#HX4zt@i4}g@T|hnnbAWtV&*d`K9uMz}QEjIhxlLC(qU`#q(E# zXgSc1(W0~54W^3!MBQ1J9sUg=P}@B+)}Lcpax9g&=WS>a}F!;n(Kp&CWjR=7ObMj?hNHGHv=#X?zB+R~%c(8E9Mm+T-=vYt&TuqGN_6<%l=?+2r!?p*K;l zu{zeDj|3e(pRFOC4qVioo-(EQ@hQ^p34@Y9#ss4;?1yrby3SjB3 z=&jK){ByaZlv-~dyxxOv#!B(uyuqs`Mv#pCkZby)@cpNpX4jU3oX*zCEU(IvkJ6^V z&amCZ0mA!psX^*=m_!M2z(XBFR!%H1s+WkDWy=SmLn#NTazwsj;H=F+51Pb~MGmI!!9Ane)>B@7E)>la<0}xxOt^C+o*s3I~ZV3*?-8bv0j~ z_LhVgJbp)&q*N5<_i@;hf3?qx6ZS*nSM6)O56Z|djt*baA%jttERwQc9EqvEx!D^B zdvU;D%B>uZmcaUkld?ZJnE1KdiA>)971}cY@KrwHxFp3xMw4u6?vU{Ew+2C54+~Ye zNbtddt%IXuMokT6r{QCQVDyoZk=!3YeB1e|7I*Dg$z>bayWUfVF+$|BjI;zlXv>$) zvLs568L+;?T42g_YluLz9QN`3${8+g%9!zSvH6XMya)%AP`=E~pnmLx6>-2XR-4cT z$%wv(Y27##=MLqXaIEQzzZbg6=Qnt zAvb|-l>6WfLtbwaKl6utkiXWLtnKH3QM?+WWL7ictSFthIW4M;;dSq=j`%&>qTu@v z$;l&NoyTbWrXBnF+wS?L`uo`}=+W^41Eg7$oSAfG{n6}*StzZ?{2hREnl2U}yF78wVq!o| zvG&U}3bElDX66YWbf;o>b#4)Rh}hrcO|kkHTK}Q((QuS7G*P3N=i30pIkp|;{ao62 zOsu}WbjvX9A2ZRCuosz;x$97|2Xt-{krj&k7j-Rt#-xRaf)d6YK)T!u9kb7AcRECt z)|0r#c`u#|j5%LWe`z-Dx|UM87|O2uEL3m~^XtpTN*8(QH8o;jI(kMsZLi_C6=XiSWk>)*C$&L1Gq=YTTZrWyV`^E4G`vos>t3 z??y&~#)#cyF{CBI(e#=0pip`i@aWq95nPnIgntsrgG!IZ$cZLt*5?>z3y z0nTu*bsWCT5TmsAQQS;CpZe+bJjbzWE{=+is@?f>lTITRk7*@+rHITfUEv^T02q!s z#^kZ;tVVO!ZB(F~%=qazE785F60ygDo{KAT_*Y<=xN@6I)P)r=McbB!VlvE7@-Z#mv-40pw zJT6(CF`y?}R99g>x`9%SVhYnlvr35PC?s;9J=b46J^#(_3Yiu~O9^uwz&+qrYts81 z|M{${i0te)ZmRQcFEi7EgJmyQ(rn&8vApoFE)anY@}8}hQFfunOP?ft zuGS>(K8BHTl|vY0)UHl#~v5U0;&K1UsZ{ zfZv9BL#(c_x4t(DzCSh~y;d_35XZ3)$#=_4);}Tl#t!FJblw~1a)8^72k*bS?U$13}|J}?iY7skJ%Z(tk ztF$vGnUgA)8(6)yb-MN{|Kp!5tgF5cgAHps%8gW~yRBsZ)>vs+S?q%fHDua(*2J5r z^t&@|DTwwq-#xu1$%u>|L;IywIb-H|mO5fK-DOrGI%zgyZdmrqNMmOc>Cl>4Wrlov z!lu59gqM;Xv8iWK7N|TKAmV{<<<<0z!uYn_LeyuBiz&m$x;+$h9K#nwnNt*#4D@FT zOev@Aof$c-!}3eEGqp>@owWw0j)J3lQr?V)r9S=3aOzT@wc|AxZBs|<{}sQ-ZHD+r zGjE8ozIc(I2NqJZTiuOWML_+0JXiVKbzq*(pII8ObG;;f^Y{oV;Xwt78;3b<(m}a! z2E((u74z+KxxJ~j{HeMTUjl(G70%KR>X`4@Yx*}0$9Mm1^3^$2oVlvk?;)b-s~+v><>`&#isqQkKAk&wuIBTV&op3jDw z^2`ogOMC5}wTtxf`Kx^UfphP?ASfHYkf}wRh?Ql%^t%cb{^C1w_&Zfg_EHL+)rp>& zU+QP*C1Q+@styO!>tAa!-Kf6RScU#noE6ma$Si!0tl=CGUp;;u%sf}lQl~ZG3Ib$z zL+ZOC`VCg4BFQ$O7r5ZrH>rrNAi9djpdPv2xPV27$U9te%=|iZFzn{*$n+%Vo!2Xhs!KGyG&|PjEVF=2{AF52!vP#BMj1?z5SPNR-^T0kbWh zl!aU8SH1{;NhcvOcdtG&Rr>L>mQ5ojO+1e1DUqV6UZ3buxa0QtfY;U3+=u2Md`}(v z1l5o?Wlel#KhzFL{nqY7guya7GOt#WQ zn3h9)lV@z$nq$$;96r4Ejzfa)Yo@EV@kqdlsqo+C`n*k82ahD$k>7bbw)lsS1E*hM zm5|5}Y?ZHvz8)Qf8RskV{u-lS?c(^ycmi@&lvbd_&!TWfq8iF;@igj%n^SdSJ|;MD zi)4qW-696zS@siNAt$ptJD3CC>!6)^R&0<9(Y9&OhZ60#-m7>&oil=*a}tw*2L z`#d#0K%A$<`p=C({ZroK6cNj=Sk}{xoV3XiRUONbpYPaDG4GZ`@Fqt&1d=44*GpZG z@1v(F>R7@WTC>r}W`DXdExeT}QwxfrNLs6+Bjc2{A3ca<-_5a1OSe~e&Sb>0hn26N z5^4PII??lAcWzM(OTNa_`26|fUSXkGuwX}tzuBrr&$i$BY~3SibY)9bsi2*3U|jnp z%Jr3NRSk#pm2vlXn6$(MtgW@Bdb(*md3OM^oJb~Puk1Da7gjp^XmwJVdh)}K+sUE^ zI}Axvzu$zr`}bt{t)hC}`j^nK%va_-kz_?qfMW0RrK^8J7UP)K|p0E6=ef{3~T_o_>S1fY|>{mM#Y5#mDYzeF~4X4Zu zR*kaoYt&++ak=Hx>P(Uz-^9d(Q8`8L&O}Mh3?RuUE+b`o5!+JvrbP&Y;ufUBi(boH zzP7TWp7PxB7!e7xzU8$aZ*ifg>Nl9D&%gOe=!oZNA zs0)^ncfa08?2*8;{OoXOuJK^f;zwILcawn@P^1})zX6}%w2eQ%X@7&)-PN4?aTqw$ z(9X`T*8TgyH`%F{5TY})eK?ej0s7?A?OZ?lhf>o8Kh2NPiWyJSsp%Hu3{5`LW`~-} zB$}OUj)=V~)2OgIsH>@)bx}W?^Y-rIem9t2lzjcQN#F+co!Ntk&!o4s(f?76srzz_ ztb908CI)zk6sBqFd3k)`Yw32f`IJWi^(LK4%6|^Pv?f~B4#gT)Rnq#&;O{;?nqC(x zyCm&CW{;NwSg2csk`>p#hE(J0mv0usi3#W5X$RaMA zHtXgAK`n~Cuy$|;w@>2E<81N)jr;a^F{}+*I_yi%ZH;1cc53CdE{XI`^}9a%Lo}5-FhX^eY93n5lgo-Lj}p*k}SVxUo}v z)myM%z+!t8f-{A{O8~GkIGb@!8?!8;4Y9qy!s#rz&)b((z+pzDOI`Al&c7u+ zcJ+d(SG$&zh3zd?y5h=hmt-`m9exC>EC>U`*PYLjiT*`A_ZW7uJ?qV}i~O2vKstTf zLh-@rl=W2yX&5P|UEpUMwh&u-idgFp5w}w~mEa8=&b$DzjxZI}Z-fD~d3FhsBW@bV zLcNV)EyOet4mLv9&`VGMB;!O3-^NxJmoUByRj-h4Nf?S}di}LB_qSd%miu8Rv*q>q z!Rg7Ej=Ru^QG(X$!JkXz$DQECzs`-$Zz`;`JQ?7z@b#BR0YnP@AZPeCW&mNC_1x@- z3ak7gC{&&fWgS4M$F(zA_PGb7EJN-Q{TyFe?^&|^U(!s&B8tny)~c;`(`T=*#Fx;L z1g*>Y%}2+b=RDUuDF=9cUbKULZl*~y`l~AmFD41^H=t{gs0KYn9_aENmsQ?-omT6V zng9cGgHHdg05q+!M8$~#*{RF7HJX2fMf=3dkPjY9uu=+`g9{+&8-cWd%3~#EM7^P^ zE~3a>0k-(xNOYedl%Doee0HHx>Y!?9_zKd=9F?J4p%MnW~sl)$ug4U+X=I*%PpwG<-zu z5KZmAT|8Q4zjm$+&!Y9XYBU}#(AxTj&2K$Btsr`Uq!o@E0N%kebq3;j@%NVrlb0u3 zo~7?(3RkC>B1H;X>;qpsIzJ`jP}A*B;5ys;qx?z|a)y%oFdlUC%n&;`4 zDN0v@(?=&;V-?MW64L}C+uXDxFHsK5Ly8h*fw*Lo%xNC_z=U0!q>b6S8prkiMxbrQ zeL3^nsCyfT|7DY9=I+Az|M)g=m9ahI=kw@&f`D%E=A0)`L+m6PL>^SXwq=k6JTyVT zdR8>;@q)|Ymly_Fw=aq)2XuV8Vawd1v0UZm$#ToB3rvBxgQm?F7Z>8Z2ip@RVuhEt zS9_i!-`vkON4f#a*%449{cRP7;=+wN)fJ-kI?srp;A;c)EiL9ejw13ks|JC5wQxls zybLD8C5K5+;F0L*9WHljt9dL%2yRbV|0x5Wh#Y5^g=aCSn1P99qG)!$kS;zTT5M{I zpuD*%1j40!(n4Z_wp%AIiufE{{O9)4oa_mB4ZIR6qKXQ|!z=@${8UMwxH+HkUmFcT zV=`juaiBmzB@kV*IkusbF^b9^z~d7Xz4)#q_%g{a8-6n+tJWql$u#Fv(}@phT$dqRlV!hDqgh&>`ah6u1Q!NL`D0Cd%t=|*&Q0^a zb^Wo1qa2gLeFjJXj=xnDTXF4LNzkz}Nwy8LmQ9DTu@o(B zOL@Y^DUfs|XbPVLm#fuS@<$L|_Gd~ zuP8W!^P}7EX%{QiwPgE9eCp@J3c@SIW9Ux=t+r|Gkh5`^A-0)sc+5vR3h5wB4jbPS zs=>0~vA$(|@>edim>9&h=~NQRIz;56V~RArW2FiLg5@Bj{>UWNwGBZHt6F}1Hwzi= zCJO0!~X@=1t!|rpSv6D!awsH z2v^Q8bZrZ-omb*iFL^*VF4}x@D54GEfrYo4N(@=VVm{T_0)F}6iy&qJN8-a|!;UCZ zp{9lTS@*rNZ&BitM{JR;43Ty^z@q<9vWU)o7R?Gdl>Eh-`W!I#qxbZ9SPah(I~Zng zQRIgz^O(VKh1Gz`o@!p!m|k5zmjxv?1XA)q?f)zT&UDU!m=FWyr2KJ;Tr^1i%vMOI6rPfAmCpI#*qSH21;XCLI*J| zuy`W9W^+4~u<5<$r`nCdLmje3zMs;; z@^x_A+ZOa{DpmpvbNT>S{keXWSLNzg^X*RNv}4%g;j-Enf<+YdmQ-fo!43nzo@i$} z!_Q>J5+tUWuY@S(wVY2zRBLvwcb)$nbv&}LwT>odX2Ta27T#5s%tlBv4V)08w`}1< zVGi+}NTXmp%%n!AA__Ld!|h;8!}kHhaU?hqrNi`QZx`UhMD!_iQH@qb_#fmU7>^qf z1ilY4Y8b)HrWti7qZ*z~=Ac5%>bL2bs(=Y}K?M9|Ss^%`vmlUzO1!DkYfQg~5i-XM z2#NylyW3sr7GZc6pU2)Z|1&!#Sr$58U6FCD3puuRwujb3fG3YgTW`W0IuXQ>nGhwe zCh@=*WTqUg#vOv-ypPA&^O8Q!l&P6i2{`nW>qD_^%1s|w6O;>@c^*P2_qGSSoI_rO z6UAZi(lfD1kyBEr`WTf%pDV>YhPOO#l8J6MW*L4ug*R37-vlA;I~Q- zUXii|!JeNuoT#Nr#cgXgIfiaG^@$g2gF=Q62mip))aC*8nl*f?|id z2cYa>{y(D6Vwc}BBc=V_a8Hs{1eyi9RF`F2C-5%tn0&k_d(MB92`dIRwkq>Z* zMLseDz-sO@IvNcB=7z^wh3du{i#%`@r=noG2H<{*@ZDL%dgjG~WvOnSMaRql=#xeq zGDt?R)j$K+_N#1&eWAv+GnMn$5vWJp(-@B>1kHw7$SBu;1&pde2eH|tuJrkq5#dao zsJUl97iSPpL%8WhJ;0Cm4I7-moS%+(plrO*LhO@7f%f^B?jZR%AE-{Z&WDa5s_ zl1s|ShEF`02yf)%8Oro&6VQCk!wn5^4TH>D8!7~_F}gMANmv+otJgY!!EKS$TbyqI zapp7+FtO_y${_&f)%s^*$aCRgUmH;z`+%i5jCO7pbkAPL&EZa2J{oRapWs6tcq(`U zt^BbC)&WOV4R{-c^M*~)Ow@lLo__bHX|opq?R#X2jsE1yBpMe$@v<40P1k`P0|Oh& ztK%rr9%6NwKeRxfVzpA~Sd>IHpeKRFErJ{ZUXT@0Yzk)Vrm=MB$p#d?Ih8-{CrEz^ z?h5tPRh23=5Sc1!{v^8fDdyhah!59AeDSal{)bSe$PSoje_kqF+vm~&hwic#>x=Ev6+Ip&8$ zBL?2Q^e-(!9CEQ~5)-g;Folr-5h?e22pnoc;JL#Xbgs+C{yuNN3;*i#f~d^OZMV+t zqIMP_L_W6-K>M?k**f>D+-Cnrg#Y9*Y3%KLv;Ixo^uQH^W=JaBAX1hE9dqDZE>-2d z5E+DUFO}{SZVYT+>}{cxsDTYpmr8?klu$5rnbpjsbd3sk2Rc#4&k#S#YBYgh1HzRu zmDK4U9o3xl^&H3#i0@c>KJQZjEb5jK8H!KMt&sA3So7%Ib%dHBqJ^%WOb--aqU zX7G8yL5$ph1a_bdjxWo?Hf|gfbnu}?kTtct%UX)FrYN0%Bl<&Y^ylYKlQRI&nCe>` zkx>t+6OL4ZZf_3A(mE>dWqnpSG?)kurV?o7@A|5Ttqhq@%Kjd=m*zEl-V4d^WB`pA zmvUN!c)8}T-;1on`eVH5NOdWJaT*1|;{$dtrD9fE!^qre#kZL`{071UUoe9UDOw}t z@zKzokP_dx{#^fFgPNo`#wT=wQtr#Cz4r2sx2D?}e}tO06G5s^IW1829#{la>v8M= zT1cV$1C{r!#+wtNM{_0cLxW(4OTJ=YIc*R)B;ACK z4HzuQ0>HP20Ay3q@jQtWS?srw7sC*tv(`geUHW{J5PIU*;SbJkPM2I3F?~232AHoh zp$TTM`#~$2C*{IVQ*qW%(a(Z)K2gJb-5sM>i+SY1a$9MF=WM;U||yp$MbwaXn2rs_|gJLHnx z7G(giv5LC?4n)FS4iP~5M#rn;*rOJJLgF95mDPXI{|R~oo$SS_YI?hIQ1c9bxi^0THlKQa|L_^*LrV4VV@?J`E;jT0IP zY;piqp}j2nA;S|dmZ$}zP{{}}n`rbPGpsaAMJb zsu-XPZKXfjpIJ&oFtE_`&(P|cqpU$=RAK3|+JU(dMY8^nwL<5|{ev~t@AD`LIaZf| zJ)3}`kV>qnblvHsx!6);e>|7YZX64ao0!}2J{tWOG79P&E$FCn0|M0y<}ucM`%x^@ zn-;(WZ2YB%^Nr2UktT$J&v<_UdiwP&*S_I+SLqG1L4i9;xeP?jL=Qz=_v+5BueqVe z3+imjQdyK9N6+@7$LMtX5_y`B@GS!2G}JePE{J-+GZqqR3v9h4kHG6-(NI6S3*shp z8u4Qa+4jk0es~rU2tBs1!#sETRhsw=rC2BPe(%CmjvZ1Be2Q!~n<}@|vUq`c^@B=J z(B*HZF|r!I#7vV38bMyt6+vl~yeY&zD(?%&qb%t#QWELwBElrZezt|IIMNHD?ceWn z-c#j*e-F^GF7TWrQzT+ba@!d)?^_95Ld#;$LNl?W&vf~y`kY2t3zhd=xAH4$>k$`` zhZ7SfX*(+-+r=D)NUR*aK;(Bnv?uv_0eH%}&1&2nO;jCuY9!z?za&Yk1$}v<w1HmGJk zi8-y|eNjX|8N(^K8cBDzK(!s=Xc4|N(T98vlyV+t%lsh%L>3IOtwj{876v-D^G2cg zaZbr#66$*U?V#%*4@?cYyeY{JRkfUYG^##V)aW>IQJQ@*#R@-g8snz3s2;}6hZ)*U z;@QA`qL1SUAnJl0d<@{)M3L;c|G$!c*wyd975@J#AMn4q1qA@={Fgn*Oa=_|zw7~0 z7|NsYUupqm2M6e`{~o=9l+ilT-PgzlL?)$k66cPE-Ur4}JZ+ z`wLpk32Rabyua96g!t{bK@zV~XauET?>+T3=Y3a4LQLFp7SB~$avMAKLA6lZ@}B=Y zSZbg8RakFkzxub>^=P#!@nY~L*93qkjrRWB)mzTiPDIIuBbIA{5nnq1Tj8*klv@-B;Zs&~kJUI^>$KIF11$GFhc;F3S>x_t zWMtIZ7oHN9K<@X}*w{Fxovhtzvb0lL4Ee)ys{Epus`-NBGi+^lx@zd~=*SZ2ig<}$ z@$&L=0QGudH}VOPzfSCh(n1V;mLQcvFikFI=A8A}@dOuZRCrcgqQIG=TUK{*YojOq z2YNNH;{jO#n|aBal7|rV9{+K6L}A=tezSp%D(oxBdX+=PhKTAC!6m( z|In%_*O9pPOgs{Id9<)cIh%jKB1kskY_@y5DvnYLhX+)hFNWm$GN2__xhQ|E0=B*8ir_9{O9$^JK)!8sIDn@M*=TTX|d9Tgq0t6KYJR8KOk> zfiXQL!QWw;mHl29-8z?!<|98>&O8B)mCKuZe0>U3t22Aw086VUERHgnPr;)R9S_2z zc>KqIyacBHyM@N{SlRs^w@6wJ=fFNzhc(DPi&2z&JUXE;j(q0 zUHJ!~bE-^*oOd`SRen7FlX<@hErc;9|G009xL>7zu0P9AmAwhs z5ZEtUA!T!s1P#(o2fwE_U>5f@fLIZD80?boWEdZ!+SpCV) z)EFkLN$8+|QLVOwywvB}1{eOr5b+^^1YGl@yMlZLUyVl!$$DEP`v*_(v{M-Xp!#6T z6Q~_d=luyM7hA(_gfNP3PN(|Bdco@e4d0YJ% z2zj1R18^Dm$p#UDa?!`3>|0nOm=Q9y&*A~FJ~tN-zAxiZs36@CX6n7~vQad!H}v5> zX%cf{W8fASoxgvs4_}heSH1Vg!WZ?7lrCK!e~6BCezi#RzCd$hFVI;P;EHwDT+gTI z>FKqBNok!h2fn6u)TRc$+wqsf7jkT(kq+v_A|_)R&kdS0}dSs9goGf`j+_7pdUFki&iO z%YdSh&kD~whh)!}++3Ui@akDo2iOA)gC1r}T@KL>*>4fE(`sRC;)dMZ)5K(HU`)iF z$u!~Es!J-hGES7_AQNXxB~XpG6@JFS;45f9fy}a=%3A6>ew8tW&d)VAA3L@!-b za>X~-ExZm^owRMIUR%_GYr7wEzfe@{2 zRH+FawQG0fH_@u}Z~fb;~= zSx~g>`3CLXxopiwcG7nBRzb-l8kdHl`AMn$B`@r%mLc32TyXV$PcU;0Er2J0dG+|I zGcf#S+twT8*qD?j1pfU%FOro6ct~pytc_oSMn_e|8&W0y1=|oN?RA zhm{ZHkS~0{Cu#Q(;fQ2-OQSn}A$yCE=udLS2M+n0tDz~AHonP_^E-D#t!>Hm;s{S4-E7AGUuq~)S8D6R5M=g+b`FvY1h z_)!!&Pm{D9UEO>PnJ$IzT1?3oNf>vA2+Q9QFzk-0Xcyn=Y` z+cdM|FW^QAHZc$}i|z`Ee?c~aDq-5XE;a*ql=M)~+!&@*8>w`9s#vgMlMn<1A$_PE zeT7uRN!54h8DM@gYmi}Gv3%8Rj#msL5oI`&@^*Te0CVj7ZCGh?InNLqd-YT-6~8e? zcrWFnPQy?XST&V|>p9lf7q!kveK&NX##jPWQ;N+3J-(Zumem>2es9YRc_(H!f&w0efVe~7(B@OHv3%n5fy@k5sh%x z5`_$}l%GVs3n-)aT9B3z;>bu3Xo|<$fUZM=o`~*}bCNuaKK4s*6X=o{ojG_#ry`K#DWKW$}9uGc`lGomh)tDxo!8 zba>qxe}N!M3JH^8TcM`swG-`=Ox&3T*=OiT>?S}gC^~%4nvvdiE<~RM&fWvCBNx`! zJLmg{VlVOvv5cskRxMV#-^jS&yeZSJe}SzL)&lmPKufN9K~>vVLF-+;I2Y|eRy-G< z{^1-&S~6ft8@P4W>&lX>TPEVkif3Cf;Q($UP9Mi*CSqG^8*vS#4&W`2O=P5Ta4D#} zz1*UCFNdn$wsaHn zgqhpp`P0~VqdsvQy-JQE4^#^lio@$8!_uY0Z*tuD6KS&SdfzYsO10FSkP7G?2Ph5{ zs;7$^AQ34QjQ|;d@n(6>+D(fe?xtNW?hOK9QZejmNpqJ(nHUmjvNE$D=}PN@W2ckc zT@3mKifNc5ghWA%T&-r=$JC2-eI*`cbkzQ`sr?s;{f`enb*X`TLOoKwyS;fEi5DtF z3QV%E--jdqHlc@yaCZWeEcc+A3z#x2Z2AQHWPY@INhoQQB1j=c#Q9W734;ZLqqfKG zczt0F3mjWX3Lhx_I5dqL1%3x-Kj206J514RYD<$b~$TNS!x0>JyRyX*txGGln$S%;%*g4f6ya1ckC{uNhRu1gip@hfN=e%IFP4jV0+b>pnY#N`dpINdT=`UU!08O z5R8Q&;r98HHs0BHDABy&{cb~09HS?k7LxLURvD*{mnr_>u<42bf~UuRK*KV-Al-H% z9O+5b?wZwE2FXdZx2RM-4Row+!@_^E>@SB0w6YMQh{=Tugc{QZ16I)0%ad0j(Oxbx z@pAt5ekODu86^n!I5a;S2nVgLdVa#{OJG5>;~1M)i!`7~jj(r-SeW@8b(N!2yKmK8 z23DRROzsotCG@78r`k;lG5TNLtsDZiASo0I_ys1FUtj%md%~6U-0|lOkm}rBg8#uV zs5`v>w2b4(;m;^Y;*|#GDS{&-B^5e|}#cZz1KF zN0x=DiHe?TAr-YZwr4bza literal 0 HcmV?d00001 diff --git a/papers/atharva_rasane/00_myst_template/Distribution_of_highest_ratio.png b/papers/atharva_rasane/00_myst_template/Distribution_of_highest_ratio.png new file mode 100644 index 0000000000000000000000000000000000000000..79daa9ba2eb6ff26f4774aea3b73dd2b2bfe52d5 GIT binary patch literal 103205 zcmeEuiC2=_|1O<6VOi+OV>X~>lPArz!2}0LZMM`(vvMG_vT_z@0WAkkS<>V@pp{vg zkd@+WX-a8|Igg;CprRt8B7%B1eb4!=yVm`#`w!f^*J_CdFMIEI@6YG?JkPV=`_`5x z*00^N76O5+KXvlxc?e|H00gq~y4-5;7r`RTCQ^XH}SU z<07_6V4!PbDw6wSb7JuPBGcgZg-Gz#vbOV#@3`iFUWTvIh+O%fmm3}`$~q3(Nj)>f ze_n0-A_qn$YvHSQ{FgVre|7eceAWsGq%fzb=#p3EVj(k_tBN0x$24{3ySRwPFSTS_ zb(Hcea>vKr@pB)^>(TXfb%(H=Gv(8D+J>A&mqRyyY#JUOu2IW(X-G4&Yk7V4g1>)3 zqp|SJ@X*lL$#6w| z#=%B}IbJen3qCN;^kCx&!UhA|3m0B0St3l!Lq!+CXUCmz=l%WtLzWjsvqR6U?liwT z)$Kv+v$3{TELDL(+)vf%?zmRD5)z)~5FA{*ai7DR4osM%kkcSvFBJ<5ZvEV@<5*(D zYtNIvYjOVkc{#hM&(IEsDzDY7J)34|+u?wH1Vc-wJYWM-*3y9A8j?Ttd3r9aZ-+JU3a%}dbqx= zwe`=~FBWdPmC_GOLh|xRR)W)7=Nq1$p5y71>=!S#uSy4Xqr1!Jy4?)N-&a=^8X86lXFuL)%kh??!g_q&2_%$c{G)nV7@O~0 zsUlUI(jWCGoGtlehs9!p_Cfgs)= z>W&#OgI=OU#f;H+9~+yI+|-xCMk6Z~r=ZM=#R)GS7rk8USu_x>sFsJRMBy^M8k{c> z2o(#nJQcHN9>b7&%$Rz}NT#(Zd)weM?`{28v){ZK*7kRw))O<2ym00!7@W@=LiZKr-UtQM8;gC#N9{0pe>wNU z>s=piBpqye>hwn!I$xM28`Qz@$hXi%RNcTp{m{1~$!AM_J zNQZTKy;#O#zljvwS`4i(z+R5CW$RWK()@K?8hTkObVfTqDMDg8V3muE-|UW`ca(#C zrpxwH#@6YCn_+jCzgrPD+@mdv$;&^#Ym(trFD3j+bmaDmzD!5nHqUR5H8d^-FN|jU zPkn*XJw4DwZ%%+h z1a)^eIak;o5i`^a&$DMWQ^OYB7DrB?o5Af7PhD{gohTS6!CK5@4=6T%akJM*%{A&< zyYyAbD$?zR6&aNu3V|H4mj#ZIXa~n3L7AoD=&PyWKFkXSn2@F`bCt`I%5MJ-M|0(e z5j*Wdkj4kY#DP&A8Kx1a=Ja+aPi$^p-ujp@FKUo>uBb-iMdeK3(X1p<^_N#Y0jf4% z8E4&425O-{;v5{y&A!n``djnLop6(?50K#&Hc`n`D~#((D6Dsn_5iN0beF$9Yo!lL z1p7)do<5o8TeoLmQBpv~pG8Z$7>BNY4!5=)$^Qer9>Y}=HFSvMbH=0m+&ZxddO7=iRURWtT%Tp%{4uE_dBkjNSX>e-&-=0U%3SCT-0Gpk`-6$0BI|S zE|#y%bu*6cG!gYGwflq*(~YOU?a0)kGx&=$6~PV&mcg~Je=EXD)_#-l#y-7_HPl)K z_bL-atA&o|>m5(YUFbz|xwlj|8Bcv)IZzBgEbg=y?(YNHVIWm}N%c(GY`cBt+Qv3h znC3-_!)7xD(ns#1|JE#eN9fqw>?$GuL;b#i`G&iWxTh;w{2K0eD$)M%VrN73S(-}7 zvX-`XH4^Ke$jB!%7!xkOa?MP=W^OP@E}6+cxlPAr!s6=&yy8zW=}JgbV`MP?}%-h8mO*5jBh|3^|j?RWtvyJ zu$^Cvs&$empC6E+d=k}pc|vlfGM>=KRytU8kCB%w!16m9=%b==5?+WMd7xhzjk01E zVse9Wvl+p>wI-FW#+DgWyjKr9OI#wHo#^<^ZR4-Npi$XNqi$cQZyC?X^teocuzMqE znIj^y5>BY&{Wg&rT=K;WaN{8FAYJi-P$KMfAw4oVJy6bZYnLcvaFb;w7$pLU&QhtE zTvQY7mlRA97EDA{^16{7te!cy!3lABKV97Yh^zpOLEYrXU^K}}=mdEj3Rzebq=ZSB0*w=;I;01*nqW7?^(5*FpURJ~3hkbQcV! zdet4Z-`qGdR8XU0&U@7%szBWG@$P2_{LNu|_bV!pqz=8wcbV!zPYr?vRR z>ODj~*#55U^HWHyffTxj zP=DC1zT-t$NEp6iamfcVES&s)w#-Tm&5#WH;~gdX(**+^->7-yo`Dx$LKK~~615te zBI5ABx0jtIdIQi>3Ip58o6sQX#|~|@Bi^mpXtLEkRn()DfKU|rb1U(E!Du%B95e~F z593NehnkSF(63JPOd5l|B;l*ytJFFBqhGIiU>GmzQ=uFrz9qnP2^hq>*^oSQG*}Mu z-RD+7KA%9WOVV>TliMLZ*1N*m-eDKpe`%hMj^6gggMwLJ=%|J)(bdCnau{h%PuxM5 z+J`^8T*<0c^2?KVjEQHQ917T`%mxjTYpkClJG@FGmc|VqWY>mci@4jWF!QQo!Q)1x zKrZCRL0m}=CGY-ks916+89(=PG@GpUTgu{GkClppw`u3Hg>LyF>BNH!9iQ3f<|LMs zW1!LN)0RnNuQ_1+EO+Gws|N$tb=-1&s98ndiU$lTbTCkkBb>Ou9v2HAaeuTDQ5hSy zEMb{n8t(XN`i_w+>KTYN*M({l&Z<5Q`k`BSoHFPe6;JMjvjm zPK>CBiVdAlo_d&zIswas4;96AbDIW|MSZ4L_DUNNi3|L&<*jBHXg-8tZOl)n&RiAC;y$KQAk zoj}E7uuKE88=q#5u2eA2zsGn>cQ&4Fy?~bc4IN8viNV;BUF|+1rM_=esAt?7o|z&} z3PXDs?C<(z>ae|d6y8boQZn@{ttiA8g@K~f_%G>?XIAa2aOxQpC26k!U~PITL>$yN zHtE*DZDX)Ank6eCM*_{|pb!YED$bgM@;w%-jgkIdsUz5UjA)piDS)EqP3M2(n@UZ* zrM7mC%2jZ?QAE&Gg0J1G;^|steb`u8(fMz8!;zP~+v2JXthNXmya)$>$c^6FWB0-1 zQN$W38fHpZXdy>GuKsd2W|WQ}iz+b@sNFOfsoKBVJqVkXE2i zSg)#v)bsy5W?jQ=zndIk3f;vCaJH*JuG@kp;DXAfrBo<19CfkM6V*t{EfW?D+%bge zd8YN^1ha!$m2=+V>>PdK8yEw(1b0Agn)$j47LQx}G5=51WJ^mR#|B!`1sP8LkPFvFN!pG0fj@&s+zCc!BO)g3i6}74(VnY#fF0_^I-E|`&%Iz<;7c`FOspbKJA!c#R zZZ=C7zo$%>AZ4o)Gn(?~lt$rxgx{?Gq<4#CRP$t*rT!OSSuZdV`*Nyno_tJ2G zEvA1liWwG~qSm2}nu}Lio|KdYZOzlpEu*YlOI!o{y*xI8Ov>$zz&dcT(uE*Y-Q_{K$J)I~J40~?iOAB)Eu&HH+Dv20#5iA3~4oyY^>HPDGpO9xkQc(51IgIiaUL$2 zXG@0J(}qk8-vx*62~jY7I1`Mu&7@L;cBPRSwy!ECjpNfgu(p3eAbsmK;1I~IXWGru zs8F{r;pS_?iUVvD>5aajLzt!M1ii^Pdo$I#0_BIFY8ayvB|4MrZYu-s=g(ha)m0Sa z;k%2b8`){dLq68;UlxC04EOylVU0_CaYKR0Ah%}V zXOR!vEb&in56%*wj@w83+VPAU(=G~KhS!M=eQcAeHHakG6Nhu$wN@lF_w<+f!PACz zkHiIj^P|Es4@2p6KA6cnNZN;xUK8S3R6@^!TIZHZC&Ad)`AeRt)#M$^lkdCeZOP_` z?KUt&czFxe>Pw1RgJng745g^~PSww1uI4b?PdST`wi1DHA@Gw#T^(hNCyaz@C(Oql zVHpI{)b&Tz3P?D~yNy)&NmBo*I=1lMdTdcXC=SA!U?Xc*8o$i0j#-v+(ayNPXvH4& z2_9k2q5!t-rJgG1$cTM*)9#HL;j|9Js|!UFUew2*xy{OFn8YAko7Awts{{DCkQC@u zCsy0hlo-40;8IvbVeQ?VmFJ*VFz>@?PZkHOhVgQ1BTWo13I&FL`gRtI2e0P)Zz|SD z=~nEmGCeZ=Rmp~9~;$@~>^AfK%63R72IE6%nz#;d`Phx`Vm{Xuhb(C6X< z3$kcDkB~u_9b&=XFD}{Mwy&GlRIhvoHc?1N>Z_OL&h)BD`uGF`8|9MW7QT690HxN= zB=#kBJ?*U}DDq_^tu6}lR0_B{a!MO+7{bCy$v4&69Tdh1d%kJqYGxP_udV`L33>nV z7qy~3@L-eG>i#2D&Jin<3+Lr&+P=oON^>LOa)`&?)}Vr7-wxh;z$>>Lo-;3x<9)tv zk2u+RK8SO*=89JbV>l~smS$HVOHC`E=y>Q>Fa)z#gi9U-`&9K7<|1}v$e+#4)Msr} zm@Z2ve>`}ftioT8Z_E_2EbO`0+_>)x+IhvtIzNUjlrV7u1i^EK|CBpys5gLDcYp*| z=8rEatd(k88`z45+j`W>!eGS?c<$P|Q_prs)tQmLj6i;}^NJtnHrf)>hi7ZIdadI& z@h4cOdlBBo5f2n)N(v~DDSI5y+(9y{AFLYZ62|p^Cx7rSYWb_DzR!|_m ziku{k#i(WWyhVZe3YuC;Lxb`WO0yur$E*I}^(!wbgCrIeJsS@pFLuH z1mQjw9zhg6?Biy`GgC`pPhGrg5^}rXmL^Kp9N};!8>@PP_m3KLUi2Ktr`mUuJ^HM?=zJYa52{_KJRA3@_*$c~4+_&GF$Q}m>DL}o5PXGD zUImY4B~&E@Fj7aXU+XMYsj3U>@`7Ony;WoRImQ7A)hog_i&VVA1_FxM;nm7FD?CE6 zaza_Dp}37X$Vec$I;G83`(1%kMm@X&t#3e-i-pphMnEW%DHao_)>_qzy|XtXK@ zx0xM}4999`Xs>7E2*$i@`u7Jkq^B3m#xZReiw4}F!dc4!#d=z?{!i07jtj+qHLMo? zw^-jl2Fnw2JHF_U(=S~Wdi&Um7rgzG%(vT7#bxs)b7HshdBZ8;g1F480)|WbL{JDy zJxaREnN?M+rJg{y)5QK>cn|!R3N_W~60OE(8b1>E@35&3u9Pl>2;BJW=@8gw__l=w z!d#atdV^CfE0=)7Ria}_JuW|keR?Pn!}F4AP~*MQEZ3`quIc|bocBGh-c9s?T{8~8 zr`={8+a?2>dT;N6eyJ5-tHs2tzAC7}4EA|0kiw+q3aI%Er{Li@MNUxJ9cD1w6UiHMv={grA2vWF z*RzSiieY@BD`{runx2%E%FrBGG`y+kbiH$Y#BP2JBQE!OS*)wmgnOJftiZS>XQdg& z1pwC$sB=Ds2d<)f=1hMFw`Gbq(Les`ndMq;HMLEL*1I8{Zc3rLvE~41Gjd?TsJ#^v zicNWMU>ES~#U)d-Y#w99bJht#yDi2D_EvRgNQzU3GX{6F&8n{Bt?RDYVBdlaQ4N1j z<+8K_tNa?{svaZySs98enqQ@qhkP$wrN#-foWHb)njM0{p3=UqctzD^IPpZujAt}H zwjVq2T}z7c*@Pd-B-X0^4mlz+1=#k8!H_Oz>HPSbKaFq3snAGq*^zdf@H5(tZxXGdO=U^@3;duRrBCT9VQOjiwb z2s*0@VfwY4RUfY|3l^Gf8x>Rye)I^~jp9=xh4VLHb?|o~Pgh`QYWyDqx!U<(NRpbr@alS#4+L57m{ZsU-&8QxN^jyfH$1} zhK>iM?|1WPT^pg_Fw#Ml>d?9srR4gK1k%rrGL}`vYI`W4r~Fj*R6hKB5kSxYUs#y2 z(u+!)(!S;|F=2rzuj#9V34TyhZd#yI;^N<1_b#ghKcT#@Yh z%r7g3p5gmuD2F1yujmL-HJ}sv_f%jR2==z$tgIjADSpNYrrg2iW|J51IpXu@JtoE%g8f6+XjRK zN9f!E!2ZbHH7{#m#F@f&mU}Jr(Z}fs_o@A*-QN%1T`&&ClugRWMn#x75S<{Z=6+rg z%ZS53i741UxO5!wYxyQIzK;j#hDUvhnv+S4ICnzhN*Lm5u17U9x_S@vIb}##QM$d6 zdY43}bL&p1V|hn*FTp*bv%HaxwUtYY>Udu;t0cY5)E^`j{OoabsYW|IpkCvL^H-XBPJm6;S!Q;^v@6TJ^!o3d*;K zBofE%i>Oow;q?1hhZcb%MG3^hg$fkDv7AA0!bDWSq$LZpfwtFqq9bJ+8atk8@4!2T z;AyuOg76a(hk^=}0rw7)*n>Jwvf$!xH*|nfSu(ol|9Hejpg-up=p=Gu3kN%<-@J@n z%Vla@<6-+XemSmKrfE;=}b;+x~qk^hRvzE-mm&8{DEZ8eNjzF3(nA;Tc zi%SR=>MMYH^x2={gvIQw2q69VZC>)7{OlsUqXRv(G(9=>CGMyb+;4ua9iA$-C|-Hk z&O$6tTbVl_u-FnNVMo!p9ti6xt7TR0Y?&%?o8WNKJi?ZvSKZ??GKlX=Rp-_8%or|> zPJAUb+$D=LXM--nl%;SclS5Z#siT=BVenMNe72VXbR7y7U7TSt(dbGM zUMQU4dY5LAtGNXx3}=F>6z;_0Qf85V$B;Bf$jks~fq)<`m}29Xq`iWU9}MlSnGj-7H|FO6y8abU_z^5u){%D@t$~NAA!ppV?Oj7V{>Jn zWP~fM5Tn!?@r|ZD>;!$@i>0D43hGX(=ctcBbcDxt!|y8$cnE=vzx$=RHSI$D@Yyt^ zr>S-;DdGiDop7mgd693vD>HY{VHXe@o01d(Fo$GVc>kVgZ_-yXdf3`7xVX8_ftUYw@bbp>KL5pAG-4iktf4>|jF5m0hvx zpDRwyyQJHEKImVvy-^8~sOjkq;U+ou#f}Bo0v)jNSEUPayoW9f`5Z=C!6mEhXdG19 z71Xrc{!Att`t6SNmMMln1h}`~;2$CJ@l~5DkBFcib6?*B9{YOBu3B!u{M3M!me$&1_V(s-9hSRx?OFh$*gGDz zG9B`t>t&?GoVau0^_iciG}B&{m6c5e0u7@3`-Q<}jzQ(m3sxpsKq)D;sRokKZ6cWi8z5(SMU%Hz`rlHYxoYRQQ}vYg2Rc2EuQSXF)f<;R@p=xTpIE zz`*YR=cD0y*so2Uoo}L}qZQ91{$5$Kh3I|z_H7Lti&Lk%t}U6%B^>!@CfyzJ_;N6X zHkU7NHe1UsJy^1Ve!b$RY84QVv@|s}r4>lCqE*}fqw;2WP(tiW13c!JQq$8ZU^W$< zrusHJo&rkW8_r%`U4;(6bLXA_9ZB$nm#Db!?mrXMdEkURh|pEyQeRWkyB|J&tmfka zkJ}Bpb(yAOGoC(uYQ3i8qpMZc>({mIe=qFRfPZ`m7N~3{8U~_Nh|1u;4`kC@rsuEh z-RpQmpwC-tiK$-dYxC}Lb2tB2l+K7dYU8~@w)kaj&2bsGCTlx4$*O+Pj%ae$Ks?I? z$MF+xmG+EH$)FP@7nW$oa>^EGRXV0?LmoLey?2}rj+`*NTr9+yAg0D*Yx0`q3 zM=dbT`W+50vn&#Fva)s+?he@b&oqa_np#_5IhBX}Qfg&_t|LxR1J+M7n}k-YeVY6B z?ftyBimAnSJjYt!ZZQgYS23IC6&3FB<=*clK(4R0%6<2)wa2S654|{Qb=u0RqnMk% z^`0R#K!iLCE-@sf(s#((b^bdzz=dzpBy_*!NYXdC+e%>U~jC;rmbwopJzz28K~ zVCA@VML%Dj;Ceuk6cH3mr1^=+P>n*MJ%8(Z3S-hwr#f$MQ0 znaI?^sg|(Nn=LglD#ZQ$#u{G_GK`h(8GF_1P=WY%JaI)EDUg>#CdId^lK$DrSD|j6 zp6xG=#rVktb|9%2^G3VBN@8eD549(v~NyV{DZJu#|n5BoGx{s!4D>gmcB#Z6i1L(gj)uq~GrpbIDfR^dm$P2z)fh(4I0h%t34Y%20)*sB7EbQ?FyX?a9(xQL!Db1({ zhF0JDCqjHKi9WAXh)l9k?RWkq*UN^|*=T(KUXOX7;TXid?)e_|EAUtAw;n!2R110I zL=fL_#5U-MfH+p=k)7)MU^NWBsn$cdb1m~CNzLR_{l3x>U>Ugf#^b@ z6(;G~qJIate$d>zP_;Ke7hk@>s=!yTTF;zYZ_t}z=M;Qw^XARY7cYWvzJ|fBY-w(; z@;I6L>TDjjUbKXrSp`ZeDrMknOesO}Tp1sZz3%JE?VuAch- z@%_RNStB)Ex<(2gYmpOX_@{6}&;ySZ7Y6m@sF1`QvcQK8PZuAisxbCQgIN5zw)r7g z1tZRnSrLIi)4W8h{z$*k7DKx`=g2@+!pt?#CjV*H_WhYAQCOmH^G`H(cZC&C3x(AP z58$|2`ZoBWAT>4BdWij)&I|U9YNyqh@As;wm2PQtPEzMd%IA@T%2=PJzJb)93yYh) zdU&-o{%k{Ls+Y)PmG}J<|3_elJihuX5~iJup?lwEzN+QKtA03C;C_B+56A}p^Pe{y zpIv<3e(UE#PNZ&(fsGp=#>cj#xG+FU4L|A}&>C!Z!W9P`vr4Y4Qq4=f#Pnvl~XV5>=HUkgJ+@7g+CVw&r~D#>X{ZcxL-t zRos;DEO8fFTjx_|1?|O|-1U8OqLHzR(dC9u- zmC}dSZ{y|sAL(4V;*e*L!Mx_H9z3r3h)+d{lk!8smmapMa&|S_CA*HnllZ6boVJCb z8#!KtYw^tB&00S!EiQcnDKA{%|8!*Sv^i`3vE8;cDpS!gxg$Rxu7N`4Z3+)Z$1kS| zr>h=??E27poWQ@e`R(^DJ!q?EBKVu$nbvQCl1^3C3<|{(Sd7}0tc){tK{nZP=+@%{ z2YZ$Q%`@#=zPuy}5f3L+Cte4#<^8^o_<~QL-hB9QUet@V3i41e8rK6^wq~+Vthf6= znR~=ni??NVVend?X!3OV$9Fa#FUGmejm2JOIy&XhZ|40}`q-ZHuEbP_{C?kejYlg# ziG^HVjS8llfJaPi?QvjL z&{!2rz`d9c($>=Q>+zsPx$M=^dDIu-VYAvHi5}`xo z_1EU50g%+=FwPEVOyq*}I3O75{Rh8%hCO~Y^y-!MaNjwLq2=wNg3-N~KOB|0xZ3mV zvs)~U&SJ!Mc87chk4}I}towo7 zM*KXPC2A3-**`TJ$AN@O0WgdFKe=Ld1*={uUn?5 zRnYrj>~JSA+Sq|BTL!($ot7HQ%5pzm9&OAjY~fj>A@AK(s?1ws&dce!)PQyc)`NB= zFkw{}mg}O$->qi4-Ke`h)f5>019Ks_pu~&y@j5Nws@dSiIf6BGHE~^LM#hn@uC7dv zpK(4;!00yvh@GewYg~QnCQ} z0cR)s?8`%D*Yf&&>vfz{fJJDHasabXM9=K4eYCY2RZ`HF_W?KpgXHGR9BDNn>W;Uffulh_w#-c}ykB1M$~PA-5q#rCnHIgz)>F%nyv2 zh*ELdq$X^)_lmkx6y&dlGWi#Z1bBzuJay`n92Ncn>D}`bR1A8SDY?zYxo@<4SFhWY z0lapXWHyF3XLub{OyU`5to+8kO~C4=zB|ZU{;j>eqdzy}#|kEW{gRiOeWPT)i$S<3 ztFqpdVgKgRC7a+gN!o(4MWF5Lk5hA*pc<+|l{{tG27v1+@CIA{4Thb^d%9nb#-As8 zsneC+Jx}?kxbePkgpVT=-59IZ!TKGtxEovN7b^0AUsCpPSjfJ5SB#IN;=z^XNx zkr|V~$+pg=yZ8Y^8P|#W7sq#NpwZ}6mtoY4RHsGL!~&#UC9=#nD9KGI>2`xgH-}bG6X#7p*?p{Wb$Db)5AG3cwl61#%~z|>jO9}AeEs{6%-J@j zMNVfMt2h)7;uu~0ADQ<4i$)>bpB{>lW zv|LLv%4C&aXT?ABc%*8l<}nrMRf`9?`eBZ79O^{k4MFssxQ@eh$F4CoZ}Zx&*$*mC z0>XVgyYyF01*~i4M%fX$0Iu$PNaa8#G9qAW4`z6}$r*t9)A~L~aUA$Qe}YIN1KNAs zLqYJ%G-+MIqT80>;j|n>KcN6IE;B?`BfKdOLxGPHomjS_N8#UjNPIW*=MC3acg%03 zP2#XoOLJ1Q#o^-E%bs|A5{Hs!eeueJ=>r$cH_`IoIl-@gf*Q{f*v39FgV^hT*(2X( zr*Ek}kB#aAiRhioMRVqLQ~Ji%cKAb8-)|ZVUla(6W;AtRpj-f^zdU8_i;-RDKZ4S@ z(;M~^wVoJ6374>dl}axljl`9?F+dE#$P60J$8Fs)r=G+_aF_va-oE{LBf)Xw(+L^W zlb>mM-3a19o{l!<=X%|}&7X#E81nm_w_HlhdOi~Nnl*W6OLDMI=eVfAW_#aoi{IWGJ5?zO2TELI`^wtCfn`XG%^((nm5KL-vA zJ%Bs91(>C)=Y0gH;AInogV%KI3->&{iH7!_leEyy8bdmO_SXAOapP<>rt9jQ%UCM?RD-}ZEItIN;O^S_f$w7)z-(nH?U zG~X7^D=|D2DLfLPTRwCCBsNS`${Mw+T!`CODfR8c2^*ZZI59??)f5>Kia-i@R>Dbk zt71GTlqNEezTcmvO8O%dqBol-5`Pglz}k3)80`&f z2V6N`mRdr9&M*Tcu;?z78e!hKFQCW6`GJ#y;mD|Od1D@px#hIpVkz{mG60Co^tjvh zQ2>BsfXZ}JQIn{qWA@Jqncuygicz;$`ZX{}=YB8*?B?M&=U&dlQE|r-90QUl6$^KM zfgu*lDiAH$0_{0bp)zLWsr1SMKHc!!otDLI{MM{Nhh zz&n{^tYoT6zQAwvJh1NTN8;7()^jE0MLCq&f5wH|pt_~1I9uFguF&=cs{U@jOYJRS z#!g1~yl4a}R+F#{;Jfz#4DGbZN4_(fEMgx5_es^i_1wZ7tx|Q8yx-;c8*kwc4X^Uq zc-c{c3zo=ZG6kmqWVI2XW_A3o;-J{>pOWjkxoZnCxS*gwW5})i(4j+L+y66iNH^8o zyl80XdRBHeahJZnzI0++gU4G~xy%l))f}&jQM-2#>W5#LS;GrCVg?O_@f>D3YZ0X1 z4ZN7fG6WI z8?WbEUMQLVW^|rN^rrX;udfBLVE(Fa^o{!XD6Jx?RBF9~`+YO~)s5@dBkvR=tm;$J z((-C+Yn!yj{8hgOzbg)cxC&U@KsZ(fP3ILK z`35%dw|4-)b>^Leg#pIoO@UhnVaUgR(Bb9l*T=OlzLU#rhN6$ z;Ml;SI6J4eGT56b=YE1WrP!Ex>B^NWn|L0w*zsaD_7;6F#!ee}(b=El5)z2B?6P!vvhWyxc6xgHn&{UE!Xtfo*$KUE>fDdZ|sgRoI?_Ip(08^~z%8NIHO!3LgTr8jec z9_BzyuQu_jw+*CJ^rXjmGvg0#)KzV8zV6ydQr$J25PQw&Z_Vr8x)iIDgrN6x0Kbg^ zx<=0Bqb$KYyBBRfcr)_pQOr9^kbRN2^Cddk%4F7N0`Q#!*XE+epLc4cyB_nGA8pRE zn3bvaWh-`{O@0MT!3H+{9`ev&-HQ2ZK`l6-M67cjDD(uP+*P+ax2>~{AI|6Pp|&`g zuZ<%oy&hVHXWnvWzIr;gy1d|%4fNs2+x>Xq^Jib0wgUdVk;PLzEtV9OHc1b@(TTfQ zreRBY5)J?BhUoeEgRh={QmqWS_xF0?b|ZV;N2tRky5E<>&WK5D2{`>=T!d}4c=7H0-@~VAEa!Kr=FA|+=vA5@BZ>yof=FA?B^sUz>RQ-@@B{39b;$TizbM!V`e(Ipt5h)iqP?JCJ>(s)Y}KQds-1wX-% z&%HceGdKCNyDZ&B3P)L59SV^O=tSNCJ9)uw}TC7_5J+Ne4{;LXEg~j)^va1(2^v8TrsnqqX)T2BQ9xkV>ECP&$vBOl)0~sXP zxdmX+3~foLBIR1apEg=hZA0Wh=*p)PH$~y(?hTiBoiObc6=cuu&I| zLWXWSaQ!4g7xonAiL0cCU@Hdb2%2{yXADWasFqW28oP1_(WcP=uzLGpY&Zq^YI*{Q z91e0x@aIU$RADMLs2misM0!okQ;6oI zS0mI-G?g}tcT=qjE^Ul9KHe)U_{^rjVNk#YQh4u@Dxl|zA@<0ywflHyz6QjQ{qUe4@DPtq z*XwrN&Q9AH%`BNL;Z?c7Yh@MI0?-*Z|F(;?gPT(%0~_Gb&&?v1vMa?HtLFz?Uv~gy zEGx&&SNv>!d%;}eJkMtc9)LthL7Jt?sD49my3U+AIIDUw*r@T0@nsHJ$6J5<2&nJF z<07Dl%z`?>N#pj-dc z4~CD-?Mw!zSyJS4;I-nyqm7?S^Z_J78ViqcRXT2voKkNsR2dHby#w_Z+3zpzt66*2 zLi{A)eBP(muL%ReEb?pXZP!#?xF%f&w*V9ljEZ3o5Y^j(L5uy^JH-Ci>_fUqgLHXdjE2$a@y#>p5NE@!&VRxfTTD@KfgTS2%5oh@2IQQA`nq(F;a_vN*@iOS_*j6*)xlD{ZA>DxM`1C=$2Q@vaeT1>cbt}?@#GsluIu>jB zhgXANL3|7eo>9DAb~EG;QVic<-*D*&?%%H41*~#}@dMh$k`0Y**VQyxZBCS#1pt;d zY4yO(ins4=bc}$N4}M9Z4Nn**00S~siL2+7S`dU+0N!hX2aEyVUTg1S!n~52+vZRc zD5HszoXgcIW>^UK$UA+qkypox7qrbR73{5%e4(*>QZCP7%?Nh%qAfp{t2|X4aEt{Q zr1>vCrG`_kXvt-SrW0K!pHhU4Bd zX4TLZNV$pqHsq~SQIy-Hk(k>8cd9|2#BNenC>xa0NY#=J+|Tu4rAM?X*0VT}MqcR# z#)nAb`SP9nvxk=41b&>i!%YPgit}>RIm-` zzY86EYO!xk-e&?cNI8S7O>k$%@?)S+RPX*<^`*hbX2!b?uDaz}KeOsL%4gqALlXF* zVy(u^=bHN7{ovc%Zh9hT^&DV*4&R5z_S5c{M78jYuE)?$8y=c&F7a;;Gg^ne%57q> zi-Hl9`9VR8@!`}S3wygzq_Laorvi>-(A3UBT5M3+SB92R$T*=u(g{%O7>Z9c-)~ng z4!VM!AI7sb@X0HUX-Oqpn%GR_SHBTgSQC6x$#C1m8(7~UeG$DZL}#|m$cPqhH+>TC zXOUZNCaee|n{XU(DMqqvC2J>*iKleV=4a(c#DBwjEqLqAoI>bLcV4G-Pa|%4Mm;2Y zbMEL6t<@5KOk^+hw0S!*JYK;1jG4)FFRUDAGs+$q1>gDp#Z~-a>*xgEF7`s#nm*H> z=uIBGqf5L;@xz`RhSZt%#sBtc;=6=bhoVs@0jBB({JMF49+^cS{l2>}iq!SxwPwK7 zB_B0Wqiukvj{hx=OF+?Pv>nP*9V;!uPpY-5i8XOlO#IFrl~BsOQwN{8ZMpTR5O+-=s(sx`1MxK^<5 zO6YOebK|a|A4vzRr@;Y*QBZiuPsIDce!_Pm6MgHsfB`XoGd%Tep!eJfRps=SkFP`A z5x~QLe;Xj@g8Km7ocUlc^J&sw;GFV3!M5fYkq~In?SMV{v+ASOwa)j>hCn2#j$A8a z+Lu7EiS}XkhKy+;iMP`8+BEurz>Gq8G7j-Rw@8NXAg#;>=pp5oKi-Twg<77;Mn?(T z&z&cXimwXt9V$kWE~XtQt?c$HI+|Sv97Cads2;6G^`{G7msy6c-(y3#nfz2~CirE3 z@Uzo7TW8vv&#UUa$gZ7fxL{cu-(wCAQYhGI7Pm1pz7cEt?&Ks~EZ5EpI;-WhGcQ(J zF?RO$4d~w7YzyVwSNFa&Q#37-em~!=nQJvIFabP%rgEFhlVbhWy!ghoWeCLd88WQJ zI5-?EfRkKFZjAs6E-yEIw8i;ZE~iX~h=UElTamY>zpk%fxloE1>Xlt9 z`gACQ_ur)sPIOV|_LWep6xYqNl8EH5oJM9zZaay9dqY8K6XMbg1HqbfLG*{OteP{~ zmqubSc>Rpx``UBx!yzUtzF>45L<|L6cgii{8A=}V=*<2FvBNoV=Bi(BPh{>m&b^`# z?r!*PMp*Kq!8SVm+ZXCxu-Z86sX6>2a@)+;%_uqpJfI*5=G8&xJ}^X+dhYc!b7tXe z_6$z|h;?oFOZuE!ZrkBlMM|2C_dN1K#?zF1IBXs>H1utQ*rYVu260qtOS_K+>5t~{ z@>c`IV_~mv20=cJ5Ik3MSD=nVonZu-9YXK8y|Jb@WcW0Kh?244n{t0RC)n>)y@QeQFz$d#;vSdB*q`4YT*N_;?XP96-a@ii;dpj7%W^n!z(GL|qa+@PGzc zGSzrKQ!$S$XL0a?3ZtVl!M7VEgX}4n^0fJNc}$9^hQSrGr+#p&Bopm+EDm=DEB2pS z3c-_&$m+Vx=pKi17K*`aXNK}s%?^_3g*&%LT6DLDM4~@^zmW+zoy#Md0ahP$+y2XQFaEC&$mn_G8b9-!-whododHA3)2G+g&mH+kxIrls z{{)7M0oMx`o`DBMfHK>WR83o0SWx`BOH)%DxY`03*}o18g07(SiOS$<4H`wj1#{uX zjT>?Y;NSA!`^9Ae#}Lp>wRCjW57-|+UK@}Lrt;{cTcxe7ts}s3(lRsJqVGRuGtA+5 z(o_IH_&Hv=V^?~P53pujSHLo&Un}dj=gKL zA=LvZmF7mzvHtL;IK*zOes&m#%Oy%RO-xJz_{&`Xjk-?nz(JAQ0M;fO2m8v67e)is zRMy@*7H!z4tILn5Qidu6C)e%Uo}RFQd~n97-hORAYL6DMugTu}_?Ao}57-#Hr8j=e zdgaLJ)LBF%b{DsOzMI*88evux@Jpw=FSB@cb}1q=a&LRJ3s0FLWCv$KJ7R`ty6QZ7 z+G6{^Jm`P3gZ*m^kbU6Z|9|5@WZ(a|<^KQl{6D`a+A}pAZrsSa*_iq^n@qR@1R1hS zzK}f_Bm}T7HVzK&UcY|bnS35NNS#7PcggTe3DEmdpNxUHeg*ir+#CL)R$l$`n+Y%$ z0t4)wpk8fi*$con-va7>et~SW0>{c%5c2QdZw2Qp{A4x$Hc#MDy5sI{9Mr1XrYu&<^cbjQ1*bGmsM3&oyijyz%zh=`RWXK5|Kit6L@}3r!MHq#Er{7k355% zpS;%5Z3%`dw0y4Go3&GuBRmcqG#lShTm2b2&4ICMdCzJ^c4X;~&T=q-&MmGNVpMct zNv+TKTsRaweI2mUUnl!M!15xsfhV@@4k*r3Wlq0EP*cg7+MhagpD_K5EVHYLt@Trp ziINGgu&r9XgE7lMFIjYPJK50CumI2<<%C-BQFiU47Qj8dksZAs5dQ*wSAV&H!55zc zB1jJq0B68a=>NsuTZcvYMqz{4C=v!JA*cu_h@ga0BGMp|Lkyva#DH{3tCWI(v~)KN zHFTIr*T4YMNDrl?^qv=gd+qM`UEf~U{=2*W?eNy~oadbT-1mLtGUA=HSs%wjdQ*1j$ni|zKnMs35Ia+RkCW4#&;H2}W+s`mdd0lSf#11bWI>C?#oIz87Jkezqx-SDc z|140f0N>?44p(yj-{lJU;w&80%90<)@N*tD2InSngM2Mh(`=x~Fk}yB=j8CYtj}}5 z{szeh7MKG%K+_skTCV)4>)I!P=Rq1U9P)-uMjUnCX*Rir}u)h!`1i7!KaL>eX^jlO+ zMo^+3X`|6<%F1V0pY^#u5I$jB+dAhIS2~sY5^STs<>oME+^W^FC!nsZ0#J+mD@A3T z8q)**-!4d2P7~l+~W>FJg^9K_o3}d?2!uOtRIGERejH%UEuw# z8SdgbAh-Pf^6TVfguc=Ti45}Z!M1%g)gq*I-Ip8(&!8oMt?|USaPU_s=l2R0fd{e7G2qFW?(0DB zslPz~o9~-@LG;t?Lg0RIhcy2a3&r%W0n2Mp0iJfg= z^W?E5xZXMH2pF*+qtiq%_v<%DN)WUM7a-_m%P|M&Hoeh<8sHavOC@)Yt+%)Jc$ zwWt5@9WldpnEo)I-z^-}~-Ive73z~&q$mz2G){&4Lo|Q7U zM~tBsa9ED@UTJbVUKzyE3<$QFZ*8};>R;K$WP4VC-~b+_*LoW>cYC5bu-?m-KD*v` z#V!8*mpKb_6`#_V^hlHRf9HkXp1a5lVdRBkv!9vWH!^?T|B?Oozvc>%eC~NU!|Q1K zIGXpy`3MED0GLoa_{JI!k%ac_fqWs+?C-TJI=-dt8jc`G9E zCP&Y)16mcmjLu`vA^0>oqa^mQW-ruj<^4atLfY(k2e*JzRb64t%g^l1@%OV|$&p{n z$e5aIs-K<>Qf%p>4r(VXF~+4dGrEf|Zwv2Eml#IZR;%%mw;r{Dzi@DCeRR{teB=tw z9tz&^&K_}#?#5{r9<~)0Zst*o&Mw}Ju9hW?@r6^Vk8Vhm6<(b}Zr@HZGd%x;I4hK;!EDk&FFqdJ;#D~6ciT~j_-!&y?IU5rmT`lyv2);>s zcC4uq0E!e$1KJ}ZM(-ad+|x{Pp3Zg(U0uhk>@IdzR4xsnq6>>20WsgrL`zvE1)-?J zVj_8Jd~m2xjq`VAZ?THpcFEh@-nt_q;1UNbKQk#yk`O!)Nux2hR#)e2*MlDVGi5XM z`|C3WC+F8#clOF1fg+uimWR9Jbu9Yx<{%Gn0V}e(_u-TJk19S1SN-;^ zuZxV)(@KuYX$p_MmXcmu-XZ)mVbRx!vJZP;v8C)4ocWf0c&SF57#@#;mwRHic^dVANn>K)J{&Y+@#H-6U}_!LUF6x9>{-Z47R;S zR*Lb@g8KR01MR8poo!_cjqB`d|``>8Zh!MauiSDiUk_!{X0X*$SCv zM)0?U>+OZ|&er40+vS~hpE&}F%FNUh|9?xwbI=QD<#e{iyu-eH5swfp^`x>N$J~=( zTl)FLta)y=CTnT-yU5nu=U@@k$8biM?$Uwtn+Z8{j8PR*`JC+P*7h|%>kXan#<$Hz zk|cq&XgwCY&o$=IPlYX2bKV!5?Oc-H>r-a3WzO&WPL}Gq&sL~UjpQbAKiE!=nMgxa z9YV`Dv({8vr63!f#ys;>aYDTPXQjC6X_B?ZW;L66(aO60ffx^WeBFRe%catAQ-OyD|YK!J%3U z`mA;9d6I=k2B%Y}l{i}rS8`Wn*QbBW7+opLV(F*pcX`iiWr^u>;F09 z3b%ZB~EUlXwb7 zf~rVS7oV}Yw;sAcY_f#>JtbtX1?LVlZ@Mbtw*LI;1mDKfJz<1W7biFCLdKX1DcfFk zb{;+!wNK;=!RPwL?mlree?Cx0ct`$CSXo@mp^}1jNS@hw+DrYXQ?Q$hy>cOu9wQGA z*QbT3s9)X6fQSV#ZVp?fVM-j}c&LqWiOdSF7Z$bjME2f|WziE(u!@I&Rc_Q&$#J!f zQ2j8m4cAF#pHvAA<*nAhuG6ZdzV_A%b~7c_AM5w4w917CpV>NCKau0vC1sHjGfiw$ zvrqDG63i`c_Fl5Yn%Gu|6upd_d!m`@YnX5F! zdh~L!N_a#_xdZBis|zJ3zWmTJxd7qzCOo=TQ!9PB|HnZYI@9#;T3vlbH`Lw=NNmr0 z9jg4@ab|1kOiI5V1yj}v1tX&ovpqpeOg)<7;;`xyp%)Voy_O}ea}CN{i=76z>Cvl( z85uv;FW+9i)5V+qB;H{zfhl__KBi2}tl5l~9yw#!I+CsoHidJnA+{|DQOpihc(di> zfn8uwh(*ASXddH0Q!_IIGr5?nO!w*O=nka1zx=s+0QSjgcb($d+S~Y)Uj{gl_#9eG zD+xQ?DfBo88K$kIZX6P`Oy8=5>-j(`|B{Oitj znPxzma6YsOQ)BZchlQ> zV~m2QQ9ibVSVZa3250kwUZeoFwk|}t;f6*=ra-sp2CwQ=z*9_s6;%n=%6uRZ;=6m- z*M5#jTm>!b3|K`{9vXncFb`0Ix!sMCUa+L5c~gGR_XVZ!?^lejzr|4M>OKpA$Sop? zAwt!m%!6HCiA` zkN@!gY`#AK6NrhYlF(p`4GymkC}0Hx6SxQH#Dw1;pIA~^;=2btz zE)ICS4C1DV+i>|(+4LKs0_$o((a$XPzJn!FCz(0$-a`NddcZRHp?8dD`PP>6f#S;| zARqt%oJjkDN}bSADu^_%pe1tUeREq|58~nd;%89-mScF3NaS83d-C@U;Er_e2}t{5 z53w!>*KIGFUIeY0<$uzThTOpJaY;ZcT2CeAZe&EJ!1B_v$nhz&$h}Sd`I5GqcbQRD ztIfT?^#qdemYqKAtvV=1f#di6;ePEoRS>mIUR-BMrt9|3tEVztI70ji- zXZ2L`E(?n!aC9j=wr|Tv-XgyeTOZoo*m$x`$j;x^q!$32j^lHm+uBQ5YKPh zU~|W*^7-s|n2*Zz(2&jn;3u_eAc{s;$=E1$R^}<d5+i8JT048Dx8YU4iLQWi@4$=eRUH<_J28KR;WNVmFIRgsBmq}k#Z33 zyQQk!Y8&`%>s<5q*HaMk!n4|%nPNJBk)x8ROR7tikB}AHb!5h0`A+l$Y28UWAIS)x zbQ(lfhYjN`CInJyIaNCaLG4sVy8o zPiZNM_-EJq0exJPv}ur^SkOARX&h^;r2EOe2BbP?(2uo6;o$8P=;r60ixQYC!6rZ6 zJ$0vK;MLBIrd6YE+L^T!$mn_&gU7AtiSvX>w6K2v1~aMOiPegoir6+!&L z1_y6lf9XQ@h%n|&PLIx?sdeQOW+E=@-N>N;tO@3$4v3!IL>Iy0GZENzl>CyQ+U)FX zk~x9_kL@AC0WV90o{z}qVdu{vcEocf1n{aSsZ4LGlxo#dO{$~MA#@BT-*&Sp=^ahJ z#Y3T^eX0o0mn!X$N#tP~ZwiV24QUZcr4QVnPMre)gkD7cvgBc_Z{%BG?VilEGkQYb z2}DQY;W6Kec6MA5oKU~W2jEI+MBs>M88~6806^T;Knp(KFE#WGt*LpDRrPlz5QCFp zS&hs&Oe67KZkC0E95Myo6porIRYVUELHD|<~VDFM~9@| z-=x)lQPmyxyj5Ex&5(sJX^jSn1L6=xj~zRvf7S=!OwLC>qYhoAS9$ZLv97<8XSn-kO&wYINJlfd%+DfD+D<57+FsN4op6% z!Pcy!-NxxhI=}{jM)MRce1ACONx(e#fem-jrC(W}k zHyY7;=yUA9KE@-P-|TV#lq$OM#*dy970U6bNBB0Cx%pCOJ#jZ8&2gY%lwoG(l-wqt z%VHFmMJu1p+cHaM0V#N*{6YSy8$X~F`65>x3N0HR6+UatAS$(m7kHHrUW*6rnZM0` zp3mEhsVcqG5)CWrRoekvYNusUxBAy|3DSm;06s`GIv00W2%j#SWzdpgOD0mUQ9n<^ zs3qvEH7QUUd_y)Pxxztl8g~h)U1o{~eTc-eV{q`3}Kjg7L9 z?&@8n$7a{c+V-lM8cg(M`9PfgC+2++N$5U%iOj;>N%y)xz2--1BgvttA8!Uil0;I@ z@s{_ukEMvqoIntfnd*Ou-y|`ALp>}fw^Z5F`QEE$%Hp?+sghPq4KfN8R|Xq~D@=Td#8Hd=D!NiI#hZjl#$HQz9)TF_ z_>ni6*n9(<-J6Z?r}}J|k>*p{;(iQt_f(gX+M!YFaR(<@TI5> z^Xr%599E*%%7%RVJgY%NyPW^fK+QEawi|bO_>Nj{1}Ee+nSxXg_3s7`yps~;qiWKx z9#tqIZ`HraTb5*$p7wolK~MOwo|LKI4@!0=F^6bG&Bb~s=4{=`%IV9H@ge;1ey$ux;d9`W*yVQ^p|+l zs;GuM+2fTt)=Sdpu%NxvmS|@^tS?-%rawN^cW4xj`u-_2O6EI^hF%b{nz6nQOpu=or#^8HgUrpaxjnnIm%}V=kT9nAk zutkrX<;|XLz*0<1`S()hh9=Xlc+JJdB*~_@a0oFIoEYv)p-qb%n3NLV{DIy@CTwb@7$mi7LmjCK zr^z9Y=+OUr4Z@CKoX-?ZO--%@xNm&BXbjulRI@#nTb%uVJR5ctH(0ze=8FrYOI=|S3^M_6hdS-gB@Ca`aY(dcZyN~wVPxSW%oWCk10v`;TZrPOXl zW#?Mrt4qSO&0hRFG(7*knzY<><)vF(WMVqbaz#?>jh!EBD+*l-Iepfeltih0dq3W6KQF?obaRRFsD0z(FPqur z3s0(~I`~C`3G!%TetF@Be9@)VylO8N1X8(iDR*cF*+N5N&p#|r5-EZm5O3Zyj`*QH z{cm;s@X2;YcH3I`8rY0uiQj%2Xo95k>mZTG&I^_=U7=t+Cy*EYc4nxyX(8523sX0 zipIXG0Z#2#Z(15ET9s^(zC$h<60=Ag7!KA&lCYUESkM5X*$uI7t*y0VkJo)U^__qD$GpzlHOq7Y-hCznumCZjs|O*Ezj|ZVZI7 znOMnZYpaUf{O1q-4q5wZ;;R07x@x(fB z?rH#3oSkV7_u-5p-1U}&VxQ#NzXioj@hY20zt$6SHoTkHM6;b?;dX)2>0@QPKt%Mu zJqBuVuiL-CjisbA_i^h%0wQ$uU?p-R=W$V?V2fY2Ld@5XhOps&EY_`0=yZ+GlP?D6 z#aGN`V=a4%8k{yHkOMQRcia$G5PO@_Bv}r5 z)9%58CO`r6!B(MA)j{QJTrgKdj;;4!!mVUilUc*+hvpzWR5l1a(a=&9Vu$__#lepR zB?34K<;>RhqUHGbrNztjYiyovTvaaWQiXb098#rcWpF!Br4IuArLUofk>yb7V+3?a z{O{ok_NH`dzD|J19|{pilXLrE0l;fMh)uqlQ2+nT;RuTk$%QRWfIat!;njCrme_h9 ze>2mc*QSh-2fcZJ_Atq-OV})@(1;7YCVQ1~u<*_H$3zI8Cutonwg(CpT$j(fA*@Mo3v-eLnypg>(Q(iU{vEQP9#Q9b={7P@zOCJAQ1zonLE^Rc(*8Lq5s_J8?s| zgC%qAF!Ck(#)hA$p6OR!DZ=BnU%A48t%N?sB=3+qzuL{MnqB`EoQLw(sO=A;><7|? zEu0JDl=7#ZNkex?S~M0=D6`$IwaQ<99K4Q;gME4M4CXNq<~Za`q0f_E<4faBD-=3- z;oYSl)545jog(^S3+ulCFOc9~jOuetW`wDH8ZkhHAed!SqE;xE{cdOI;^yF@J)Bn= zomi^2LxWIbC4n_zALo98csbaYQFS9`BMuz!(WC{=WnwMyZ|qS}=4C*B9epp?7oYWJ zU~Dj?|E{R&4Ov`(-H3SF2<38iDD}D2!ZS>=q#1Xj&%)n!J3!A^*o;^tBb< z;jCZde%WI>dGJI`Cwo~%kj&B$qn1Ue(c^HVA&yojZkug!{ClCavi%!1n~s>RJkSA4 zGgVFLX1&nD*(OBJYfkw``_0jfFkSf)u5hvOUEv+AKzOWiar9!1NeSx_vjh2o3cft-Cba zSiy_o6#VW5dDZsd_?k!PWtE9Q1r>RNiBQ*x6jyWv0_#&lXCiH0PsQ2cdALzl?bYQZ z_h8(PJ*Gzl&!L(+?S5f%*8;N}+dv=E3mVf)S*=uy2tElI5M6|T?UdK&g$ywFJ5lDe z*%{=XdP6ncWVqVtz*WZe@9(}8I9cvz0}O6?>z$7kW6+DTLz{&z9i0qcTc2p8(ezUb zMYMn77#S`XbN=g*dSUN!UIHf{K~}&m_08+fQS%Kh8Q*;g!SynH=ckasEvMBb<~^?E zjr^46FTwSS_f+Yeyty2LO#iX~+p&KOQNElP|igR0XxB0H>IC)he^s z+-YGRb8~T9(qF=|o&&0do&rZ#Qco=n-mKYSn;p_lowSr-)4Pa!!mA`SQH5_vxtcJl znx>a>wfSO}@q}^E8I{i84R>x<{w=Mol4iVn8(wh;(oX+Mi;p*!`xf-UW?Kei;yx9f z*5iYri9POy-I4gz{QKr||_K zc}ulapO{W?>6FN9jZlg~X$3XGJcBtXV*Qhh;Ic8^A~fxdSg_(ErtpN@VT#2&HYo(# zl%Vi+L2Wum(}_CK>qQG+Ivq61vf8TUqxV0mRZ5s*{DePT2J~bhP4`MHAjW7AYKMU! z@6$}#r`^0;VEVcX=;&U@9gn@8zJ7>@OjA^g0W2)w+8A_fdh|rxw~lxLVrXO*KKi(V zHWVLaDdqSVjsBhMszIGSYV_tDBFhpt4*@(%qNxfR!jdsRIEp-o^T)p&d*cokpv8!aD04aUfCQctem3E-y3^h zy2ZwfI&!KQvQlwK6QV1!btdh1>IslPXjE%DuSg-GWlKP1>?x+bIQ88HAd7?)IZND6 zTS`=4`1{wI;*f~LU;}fS?tKpOxAVT1TzZ#Vt5u!X(3rh*D&i>I8(23lisAMcmk=y; z3m;jViuh_E&Qhck{7#%c!OI59jjDQx1B(xj(@tgIY9@$1ci z`B3_P@1YR1`Vh?h90(W?^uHi$%M@(%IACwF@xuDwXLjIh6mRwAi>9

)ST0;7d_V zM{O_}GiQ;gfXb{5(geqO$QLtxkh{9QaC-tS%8$_?jSLMQ;V_3Rwe2h69S}9L3bP~uaLOqMq(n{FTsqiRt3o|`qkMIE0a2ov zNZ9psiL{4pEe>0c4NNf{byi1#3yk%+PF&>KO}DWwyuXHA@lufbZ-T~mgbCB1qI+c! zA>MX(tATYfrvv1N54OvbO${-5fEL_f=F|eUmx}Uw#>dGsCvO*Ut3syi-C_5~hdqxo z-$18)AuEkV@Cb2rU&{DF>mui+O%HtEc-mlXVYpzDvb4Of`;-2S9g}++rH`D){2FZ1 zaPxd014(Ra(q0o23W=4yqh@zkk6-KA3E$xv zES`tOs_W{;O|`}4+#PXMh1(%YN8s`JK4!R|WF$%V!=tI6XPAA3W@UPsT{Ko`$Ad9B z-}rIm`bD*iKNhtMYUOg6^Xc!ilqI$~$wb?isZ}ch`GI%L4%4lyQp)yp=J2_&m$~=a zKMKxM&3=|o4wWg_wcpjjtk>Y1HRL&lQ$?tuZw?8O1>nwz^{)L7TH_gn8;t0-&}v{8 zv`!Ux>#a9#7q#4iT z`O$lz?W*r-d=Yr+x}lEgT&2&NU>RFYAU!u8n6A?!wYJ>Vn^&=!3mMdqW&w*k)ru)3~k@6@oIY4|idCLLHRQ14^A zAmH00Kyn}P4%k#8>*9f9Z(nyZrdbXJ*w+`yp8z^txcG`uPEnyVUi_5W$2gjY_Y^x` znHS2Mntm9rkwpJ~ZYNwKb}p@^J>aob0gvqC&{N0dr1#VWXF^PXO;Ks$LS!C^jHta% z(5O>~XM`O?oSTE`#OGLz-*SbAhodWO(!dcq9VX$50p-us!iWyAk;^47`p@@Y9MRTF zFY1>fXK%to*;?q*8RaBgE^^|6+jXyj^G+h2j|1%SgTATW?K0tJslx_oZlCD#UfD_x zwtYwCQPBGPNY+=zynM~O)i?8^pjppP{kzjIpJnPo1VX7P;qZz9OCw}NE9Wg=2Aj8F zE|dWh0uE4lt&_t4xJ2OR&4Ff-S<@UZ7e7b#N(`jxP0f!`vi9E`v>4_I67ggM3C2>F z?iM@%mIX?$D|3>igx!C~uD~y-Q;9P#hO@@e7pHGwcf9j6d1^CuAG@8e>@p1*P?*?%?9(v71f zwjX2-U;^~-Tm5>O4oT)xepW{_*=??b=pW#y4mD9YJ{_^h?4kGKYrv6*M)QchQ9MNwJg@R!I;a-?l6MMiw0BTr|j zUEtShYVhByDPFl}4NJCQN&1L@?+3Ht(BU0v-uK#~cl+bz63Iwr(nI*&n)VKtr90m& z2Z4+RG7mZsjG_8Ogf5hg>-}GNor4X=H|{SVA$i)$8S`-NSI|;aQZEg)?*RY)tHPT7 zyx9d8{`RFyB4}}!sp~wPEY2tT>%zT&%0sN2rkNEAwtb2JyGIO7psmu_nzJRdU&4j^ zRpSJ0KV1>D-M`Y*C=)O=@D8dab-fy^3cH4l)Zr?bS%E^KGM;u4;^;!aC&`3*&WwB= zX2`e4KV4t2#qzF>_B9QB2^8elSgi2-9(^fsz|`co)WMeD&PBOB=; zDjB_c1haz)rf1&&pt)AMx?`EPVG)^qSE~0I3|+?bvh{$H7HJ{vN=G1$sE7eVCK#3L zj@JiifF<6)xYwMnbenop6yYBEWkt~3U|!x{JhIGZZ9=JVQLS7z@=tIb57sCx%_l;-r3P-FnT*nR2@0p! zE5+4kbr#OE_sUrAhzME?W@A*$F}y8Dfhp7j0sB2qTL+lY`%`ivpKi^QY3Bi=0x1EJ z6Ng9~CCFzUKtHOZ(oZmrl@0UgiLaIJQ*MtJou9w9bVT&`F;I?y>h`kL-_nW6nZHI* z)$E_d_nk`ti2vq|73JlC9dtx* z!T5L|>t6_|C=Xr2j==g4CS2@oTOQqAnNpmWc_5Dm7{nvX}-(j22Jy~t^cU_+gPXUq% z($3&BEP)Q(?Xm=H3}>E)D0(yEycXD2+4U<8v{GB`*2}xZUkMOZAlBrl)yhR%XRI!A z;+x7+#=jo|8Xsid8{jBqwm4WJ2%3L8q=o{EE{bNv77Yq#B~TS~rz8$TsOpdi%5Gdz9n!VDC!k`=}5w|y*Zq$;5u~aMl=ZjVeY9Cvk zdEweC@OsAbyXzn5n0tE%?n)XzCKuDudSM8$triLs+o_gL7-hYNSQUaprLvryU9LYA z1&CAFmAAGs8t5jEyu&1kLNQ)pLHix4{j_isKS}p`pAZFtM z`x*+U_Xg?D0E;fTc$6ibkiPX-uTbdEyXPBk)xTZ(a}M*{y;GK9xJuV~k`B{Z-0ATd zVK$m*kWN#gEyUjXTIOx8gD$+_aoOI3{*^><AKm0g4rf6xq`4i5V*2xC$g~UyU8e6`J4!1n1rg>`OPHY5=mRK_dQw0kHG{6YE989b7;r| zKy=1eo_4ddiPKe(VxH&f2+P(y`@$|Y210GJKGc`^7NNAe79aTA{HrMf4)UBeCgja`-JVOeVA#Zh9`j6Y07(W)VKwbH^`aUl-!UB|Ar` zX^1+|w+5)J9mmYdtC-8;j@f_KQ(;9}o$NBci}^Z>AD)(}k&=Zp(a|#8Xqd%3mMQK$ z^P`h6qqw&dt4I#s-})*($h7U2x6Z%LaUBfB76?Lufk2;{`58fF1+>C9W@~$_sO-*45ED9AmRflh%Pqut6x$3)GP;Y(sisPn^w*yT^=jIRJ`KtJ zMW~*hF3l{-#O}PtFJ{Qp?@6&=WJg(sK`!mQ*&f{cOaUi(5qj>atyI|D+?($Y0pd07 zZCVC7A2&S_VN}9AA5=!1Lv-N1U!>sZE_N`=7YY{y>_zaf4&AU`UehhVJA6ObQ-sel ztlXa05&q(%t?awrpxi!a7hb2g;*&me?~4#$keGVyL-bB=w;CJypfs^c8)ts@-SkOr z9s8GGsqt@RgXypCTWKvPul5C{qL$>yQJrh>Jr4Ju9_zvC<^W$(1xALzgqKr1Db6(A zZ{8e6tTz!?PEioB(k^=ZddK$F0M3VvUw)ah&GJ>!m%9Rx!^sXv9$XD7S}$AoJ$_fJ zl7-%G`H~lZUH>@8$iVLGt0Kcr z)0@5zD%@tu7O!KpI(wgvtT0~~AM1Q-u{Vx)ag^g2iJ;3)>Eb&AjrfvH*`GW~&N}6( z4Zf$j%>TGGHa0?!G>9Hfp2{GUAs@N`D|e$&_DW)7<6o0WVFyO=^5;$DR&}Dy(!UzG zp`qdRPQU?emM&jMmKtW4WuiH8be`Q1x671Kc;s)%?~B&b`zmw}GQ1ySHZoUAxSj_TQwwLoBi)BdI^uHKbGpE>L{457|FPDo$w0k3 zE_#1_E-`t&!7Psc2Z^hrWR#ql!`BW2Re4;Eb!*E^>S?Sy{Ul98OW&y(rN-cVZh@NA zZyr|{wtmjcjelOvdLq-T7fNJ*#sPo60!RB*K#iWyG>p-0RT)|dp%j=w?(<(;>SX0FE(<4j1V zn{pVLQ}0(vx{t^;%&Rvvm8fV<+2-Zm9Ctpyl^h%Ue4Lt^V^P+LkR2nrC=-gS;taU^WN zc99={9Q?zTwV%Cu!n?sKnodNK992kth-8KVKH}E;LJkZz(qonmJQyYcrhDpTgEsz% z<5U4)?facDCh6Sb%J$b4s9}=j=sLIYlxW$?p!Ilye!yKDUBB5K@+aCn!y*Pkd3+e?30JrfP#oP5lMiR z{*;<@UaiyE_;{IO=Ke-S`)tg}PCCq>$qWmN+xX<}qEr2!2|_QJ4~ECY&?AKPA;KC6 zh-$jjeM-M`&5OvK2%OL2U`(J#tB4BezuKDOaaT81@9JRqu~PMaSnli*yV`nR{AjwP zyEfrkQ(LZCzc8$5yeKLGmZY>*Wu6r*cK@SDsQ41Y=hkvvG@k-H9s9z%0OoIMUz~a7 zjX-r>0h~OqhDVx_Y()(-Q^UE{p96U%6jz@7A8Mkw>hU`2_o(3By-p@r`T?~xjP*-> zBU0jcr9x7Y&+A?PQ}M|#rO_A7&oS0g9A8*GtwyGK-cuCadEoIIgu!kvXN!0vU z7_6o!x{KS}Z1*7g&|Ca{S^>Gt8rWuIpt)SgN8N8YH#fHi;IPk07K_@=DeegHrkXRy zL}BMgSnL0?1G+#HZZ#EaQ>I8tO^*e2mh5-=nA-Nmt{WVi&LS z3CMc?1Rm;LuJ?y#ni`JHRsMYx7H*b2{skp{pTn88`^$D&{wa#Mpn4Jp>vCN0&rZ`H zL~?vfRjRk5bo~-608hk&!1S)K(KlbpUV-V@6quuN&!JtqV$vSQrJUp`x(}`b`qtEg zOnbjiAWDbAl2kvxQ@Yl5)W{?%yAH7v-m)z20LA-UUIJ5WU$AdU>x(8K84a~!8GMfd z@}_zBD`<%jP$-t?WW2nYn4MNE8S>l23dc! z`>vQ`P*-uuzReT=CgT}2^9#o9O-OrFq$v+Zj(`cm8jSDY_p;U=|1*2))TwTm5>fE? z&z=w;pP)#UG56YOEX+^y2^3k^jDrz1{V;3|2lb-w$!iQ9tsifTgENA@8~jyshR^1? z*}cFN(?4AREKRzfzEoHtIe7(&P@b0|J|%!lmnX5mKhrlEy^9EtB`m^v+TXQdqq?-| z95n6b`RvSRo$#M)Qa$&ii2`>7hevX&(pb18>-_lp8t`XS<74w;0zAjg1jD0lLQ7Uh@+t zw!_TCgmeH?^AcnuGi{(A{YUPP5kJ-q?lRPd8TE^jj=v(h&(zogBZc{>Jv7{tfyx32deHHSE#4!X(AoDh<-AACVq)5b51Al^Uu64PR>VliS;Hx6|t z$G9-XWhldyA}>A^fH8SUtGKVwAUrmffmQP*9K3dj;cDXrm|5}A2fD9RMFKCt2Q*>o zMM)DRJKYGy{Y)G9PxINLD(IrMd2!OEz_nx- zDUo-L{p=7C8vpChN{Xy`*IN}+7P!M zM6`vN7Kv{EZ2#JliB;Mzr|G)qp}BJpne~K->ZcPt_B`v(iN(U8Swy)w?_28?T6ay$A!q09_hd-HScE38g8s4NK zLl*z`xHZhLCDG;gf+?Y;DvBRRZ;vp&n^g1mpW(zfpTj9cOyw#*<~`_m-bF)%_JzDlmKB>aCjhOi_*F% zRM2HhQf3j@t#re1Bf>c#>6C@>v}t)PBfh{h?PP|e-bOqiM5zLlvMm0zOcW#^tS)Ue z{tUfQjtHA1kb#>?(MNbRijcVx2^8I~BifcV)ab{}O-)}%7!wv?`H>q`lhfY0_UQX4 zRU1#Tb14B#5QT()EMz0|qK;4sQ}tZB?^+H6U(XEyd89)xr(e2XV=G zZuskwHR}Z%jc^enBx@5pKsq233)&Kf&xkAt8I|#o^hpRJhvROhZ*R#`$4=e~yHv z2x;Somuw#3*iQ|K%S|Nc#vgN>KdG|+M}>gKYg|?<+YOMJUQ!MX3wyw6o zL~v>avgF*A3zI2yd?T@B?riB;>(X}VXM}{x&5KV-ggy2HPmVGkrg~{SHGIL+=@_{0 z3nSB3$L@Np-0j5Uub#M5J=^+)3~`|U6Sw4g0)U4vFc8Ue|BvXNZ)|SU*eMAK2@R|< z`|Cee==FWH-niF@g-;`Qe&iW7u@|gIh(qc(Rolr^^n6!TZ$}AeIr%-fa^g`u$yd); zDRd?yCoh(KIg9)LR*aluk%ZslAzAgiN9DzeCSvHTSCj92?9DO0{YdN)4>e2L)tH3^ z`Blerp67?>*Gk=YonQ3hWEYo+#E>I25+{iRCs^O+B5KVFV~C&2OYiiol23Ywy@@D{>Uf^XKuRR(EW#aj~{vMtOV|| zj@hNK))5Jb-)S8xBQe#L-Qkz`!=6H_7e0+D~^VE@Sb&@vpgNzcsB$kHf z$4o0g%Zs8#+D-THuJ?haLhq0FB;&Pz zwjRf!ek#uuJ;;w|w28L3tc+Y?)O`6oRYLb+%RP!jXT#acoKDZ=7S|F^5POeQghkwT z`n63QXv80fFK1SNnE`nKHLTM2Kf&Br8yVOzy=xlLE zT~OuF*{;u{dU@AMECOh}x{5C_gunfq+j=DT6O+l%AqnZ%<0HpdA0Pht5_9g%na+|i z)WN$^!zPYSm>dmOqgZ34kLVQ?`0)Bx(~Ax>v3pgr5h;F$$m`^0$k>WTzw>rK&1d-F z*Khx-OxLVMah0{^j7h~IO$V}33zhb$L!%9j-7Dz{yJK(Pny7P=O2hZOa4nk1cE>3; z?(LO`w#YkU3~4D6OV?Oaux6{Dr@5J9TlBAgkhm8fvww)Z@b=TKTy^w}#nGK+-2JB# zmTbrE-|ks8TM(k3tqW%uDoGJ+Gx6_geP>BOoF;h}wVbbZvB50w6ukT8|1K8R$nnuH z?evbXVkc%-In)LxV$AR=k}~e|`7atKoUHV4k}t zO(KS4G`p)P{_U+)3(f+f+XI5lW$L4q2}h+U)Ez#{#!eTWOk83a-LXagE*Q*NlDynO zcHSn<^3B_~!{nY;@SJvHooW;rKav}!sJriRn*8n`EiLk;(T%;n$VKmPnW-{_kG>hb=}W%A97cxdw-})XZ0BJIB;LAr@*uPWnIR! zqyikiAE_Y4&d%P(hQ|LsfS{G9a4045I37&i7OkxGL5(=26qTw(DGpCKk$W6+k~3gc z-Vo{^F(9UUR}>DvW>Yim-!Igy`24x`O|K0KdA?)`b8g*c`aMW-`&h?>i^9Dm660SK z)@F#viV1eo^T04M21&hRnl zHDuhRpb)aeKMy}GWX$NHN}NHZgLVNqko4c@G&FqxW5n%b}3IA8C;T6Z<@ z)5s~zDSKPwf~)a-vYN3ZgVh*U4?Mmk&}}6UmEC{+w^Ykm$kOKEyE<~;;HWAl7E3aa z;y?(Z>EKUF8>%WIXK3Hsi$tWR2i$qt)pciYsfn0O?uguPf0ZQW({v5l zyB8cFe4nU(A*#=t*AeC`F}qRm{yc!ff>cDUWyCdn&R(7^SLv@W6Vk2A*Eb%KA4NAz z&WufD(lb};&_{3lKr2nnUbA@>%Xg)c#Em?3BhZYPvOs37R1yb>e5}VS{m{pr5YWM9PmSGpZ z^Bb1ofe@ejkiw`Y7PmC^=r|&JAv&Wb9ODMYg+FDYHTNNmcd8wPcIn-?*~{S_MAtD( zK=<-BS~4f$Q@F*k$0@TT-H}_rd|D0KwLjW4ZI6#PQVemO#7F(8JX4m$9u-nUQfg)- zBkvo*(1PVI30d_UeprUz1;u^dB)|NLJXF>w{JmmT_SlEg;~KmC3`D8z5Bf}yfL9f8 z*ZoU2h3SusKkkJN4OSAhiE3F^Fij;0P(N}JjFo4P2@{Q~LMVlB<^d!5@1?mpUCe)h zE1NHqT7Qdb{OPhsF!h&+oQ2n&4Fr@dtPiAJwAmK1Qmo1YIs#5>iS61hq=HnNw}CeC zQu&^j2@FyElTLrKMBX;Ogg5$uJX+WicO5PqzwLq)6r(_VZB-I@{5^=0E=cjKB&Za= z8UH>-3jwz%9a#y5#+UWx%qf$vA^o<3%v^1wy` zX&MbJ$4&a}QKStEqgjV=#h1ymGAU)nvgt(8==(SYK|)wO+|uW1kocu(1(fSrDKzyX zwzw5O-MA_D%9&Hs$m_Ia)_4RWRoqe+Q!dPov|Q;mP`HmR8t}QHYuo(KzHz&`{OGlx zTK{9uWVcQo{fX2N5qY+$GFM^Fq0^~OoK0t}t<@lp>-X)>TI8f_?}r_a{9kzAvh@y& zFvb4bP=yP~UH&mF^e;3<*jCP!UouU|``$9&m2NEVC5c)@Jmr3ws6#}B7EOK4T^yS6 zQ{GvgxMQvczcLs6Vv~HM$qJd{j@?%JgC|(Mch=&9aqE9v70sAUgInP1sj1f?yhNiC zt%*QwV`(RJW%vN=LmPCYw#UB5Jf6*; zIP+UibbDyAkUTdL?E9R$p5FR2H`vj{|2_U$5#lmK3Y2`hWB=LqD(9mmYiq^lZR#7d zb(ye*g^fgo4`?r z?!nd8^XIWlA*Y~7Ocvmk5@AR*@w#<`c&UO(G^CG?$%42+AxcXKlzi?**IFn3$PWMN zaej(ZM665a+JH6mB*g3~{C-n_X&}4$#5ko@#Z!_m1&i2?=JHJ78RibZhIf^f-suil-?zQ!7XJu3^dplb zsqou^VFTQaWho?#8u$+tRNrCYcN5C~dANq2ObM1d9P4>UQM+ghH?7q?qU>;y=`n%$u2u)FYMIJEJ$RhI|91KjSb>&*D?qOC>5-po zw0?DnOBx<%*USx1=1_|bs0ltuJ8I?ZRMptgazb6Hd>flbG!u?BG+%(Xwr2o-fB~FC zwNv-`^QU*c_N+p~t!{plx%BLreR%wFoXmFx$9J_TS9qUql2UGkp#=xp@Lr?#X&qH)v8OZHJ8~iWamws_3KrW%;2j}T3>w+duuq%fxMqSbxz8DkP=^X%FWd98@zBn zT2osAVEoXh%DJBNoOyGinf_K-M7zsX%b_3#P3SDi8!6$g3Et>ghQ5GeDgh6x(8t|p zmS4+0yW_8Dg)|s`{c05NPy?;ZUChaJt;ebONH$wzG}+H2><5?QnfqI8PGsu^paDAT zDrK?Lzql=3jJKXTFXh0^gmx?UF|+5+o^gSTn)U4b;6?42?`#{WYvLr8gjnaD$NQTI zgn8W(XtMJ?hNq_2khJc^^o(2K_+2`l%?tgdUpOKZig_dfvG-BH4YYU+f_IK8u#=YV z&XJJN?VqM%-vBbzwZSZaGm^?7bf!-H-09V@fRbR1AApbz+9fjuBTk?!@pwPOQ;F>J zR_(u)Axi?=!;7W0UcAvWU!8(K-9!~COhHbS@h~^_@TQ23zI~~X{RWfUhl1R22{sv= zBUyY-sn?+hL{KyanW4BmxO020+4vgPT(W6RqIc_)&lM=fQQS4m0$C3fKp+snOXj~*3!Ug zS6axa?+zPCb5{sdWA!O#+-gQ^wl!bom*1eul61iH7kNYl-zLwZ&bve9agxyD4;Cu~ z=d@#P^5jw2&GH;jfjxj}+UY1mF-3zbb%2&qN6%Q$j}a zhaw$sc1UO|6&;deuNrnUvs4fwHL)RLY8l99ZwK_p3g$z1#6rgL8$Xmf^de*VQD>a~ z{kJ?JOHW)f0&Q_?U(G}!KtJ~Cs+~bo7V|2_M(J&5y$|#H$|!dLR^P#P>g(E*%sNB4 zNlhs8#QnFz<+;H|uS>BOafKUM(|52};^gN869QWi{Gr{Dg_D?&w?diMT_6k_RG6RJ zn0yuKjW4tYr$wo5qzLrbNLVX%>S@<-@?Xmvj~%bFOpo)=&Wg3!QxEq#gH8R$A*aWA z=a=uoHC3zjl}}lpP;>X$juok3H%;w4Rxh^oAG-W44Mwn4%B}*~nUIly(}8@#W!mmp z9F-;G?_S+!|0>|Y zoQp)@{tiJ>$*(i;;80PZ!v-tdFngr(34i<#RrS>TYIpB3YbHf{eQm!O3g$UdX{2P5 zp1pLr6#h*O!w=WCA%mn3xyqVc8`+p3UcR>mre4^StG;`8sjh^j68FAD$G!)d|D#7h z-t?lCw~QM+82}c{KTzkRMN3kpQSmF0djuwJ&%D)u@ZS`Pn75O(ap=@Yz4J2Sfs`cu zP3FLBBlP*fWS}*Liq2Nyl^dmk-ULcfOOxT!=iB0UCnwnN#Axzo&-QgTW`q6KSkoTQ zquNx8G%XjaJb)CKEHRCxkbOGQXg5yTrR8KzD$}yZb5sF@EgOu*@Gjq0SLW$R0@%=8LUSq-79+%$^tIdM1 zEnMQ|Z#q^9KHiWLuFCC4!BR2r2L-(?B9)B-BrOI7&J4x4G4|)a8FA2)@Iq;usjpko z^J{hY1_G7Tj?PBt4oA8C9mcaEkteR9vFFF~?rDav)P1?&(&cFAY$cRI0w*qOw0&0+ z>+#pwp9aa=HPcWrST{EAgln%oJG<$Fs|0BpR*#1_q{5DPT@UK!l*y(b^L75ao!a&hF6cFD2ye7GG7%b@0LX{>8dT^aD71Qd(f^#!kmOWz#`6rg4!R?Wk&h z$;?2%7gQ!p46y4-Mqa%u`P;3ujRa)aC;ZlD-$ELieG{Cq$xX7x=OL>SLX6tUueEb^ z@)C%PNI+atUp42A1FsM9Vk7j@V~cckLZB~6+zn2qby06-4EFEEOh&5mM{RQ042V6W z=M0J}m~YaQiqR9eUTIR~Kt~}&?GLg31sU7N&3-JOuOgr^J{Y>eHCl`Ozg$icxCZBQ zsn&sOq>#i}3J5A&QckV79xhGoHH@sY)%miN46!4MeToY2Q4BjZvaR?^uDRg0eTQhi zpMHeGgD2mYbjK@~Y4Tne7M}W}7rd4zqR8bDB37ppK1ukB+kdCYQ~QwoX89KY8%DsX+HEeB4X zr3#KZ!A4U+H^2@e__vYBi;Up_e3aGkdPf6(5aF$lI%Bi#7P@oWaIXDRZ9;#z4KCGU z9%A0w8K$;9`}eW_ZQU=94_U|al=Da`b}e{mW43c+ZA3Gbxvi!$=Xr`lKsB+KCpK;T zK2?uI(29z@R>+>TIVqoLx`;qwhUvbRrQp2Y%>5NOPSB$yB4#^DO6r3QNL9H>3T**)Ja#=!An{ zh}dLON(!{?TMWg8rW9q-E_@<`ouvEnSc~Q9-sfow)LFs&j?O1>J8+`|cQ~mN4~mYk zC}cs(>qJFfJFxvBqfR@maQ!tVA<7N&lO}BD_C}T{4*ZZZJ^9d15UI3c|CTx72ZEsQ ztwXI{U_+FNmp%pD@bh~K)_2Ns6WRm9vUdpIWzXYK*|LT6DppyHKhQUo9eA@>inru} zlf9dl!7KcXAzj7{(`Lm`)YxOpCyG?#3Msqb4Ie zzAocy{uS?w2M&XTGlU*NOO-@YYFg~zpP2*pZx%E=ZnI=Y4HOoU|DXtYVDn%fA7}kD z`T#S_ugRdqea`B)?Kxy9*&|E4$xBC1-=`DYVpu=_NNjC?3WpXRgG^6$sfUbt;B{2V z%fK{qr|acY%&&4U_rFl4KwZlo7WFobHP+pHG2{}aTeFHwIWC62rj7eTvhnG9Q3_e4Qt>Yc}DENqWK=oYn3yR;=X_BhPg9HRN##S{|YPoNP7r;;o&f< zi#1KCB~<4V=qNXvC0GOe#3EyENxAOb(>)5UFe5fLFS3BgB=0>h&HZ{7n@T9@r#cqP z!H!ajF!~5!g@(KW->7CQQuT9*f)vM1NU1l%<~k!|&cX5#f|{AGv3gMRr-e@Co#W^^ zoh7A4RpdI1ow3@VD*=fmHwi)Ox(gG6jK$D#4mvT@EjX43clB|&hT~7r(ixk4vE}18 zpoS1R--s`y*Q;fV;PzFF8+L(|HD`a~F2Q7B+$9(U;6iSp^2=cbPR zXM5bOmCAtY+FY@Bd!Kq55WYLF_L*pD;4Vtw!7KwhFA&G#$AvRmizE5n`Wonq!X7Ls zHV*I1nu$H3ntEI#aqDrh*;r(ZR()DF{0c2=*I>`a`hXw*~go11pFGUdyT{NFOUcIV*L`D_Wp8>uOC3!QZo zK~Y7%3H*r!Q@Q_-i+k@b#GGS?!oHYd?Ah#&A88Hi9_u9g$hQ*V|B@#7Z&?*+s(ZcP z$ib9)Hns|T#;maYE@^dU@Y0z9M5C!L#VTX-+R6(-D~60fefMxAg3LR8DiSJn&?PrS zarXY2<)6u{6?dLgL=e4_P-K#P=u61|eL3|bCn`ny zReMRs+Va(i=mwQ3j?RA&1as21)=Hq0~EbtiHof zOi)>3x5lwA$i-+v1 zd;DWCHd9-ms676P?If3g#CBrEM-^g0rh&pid(~jXB)4k)Ufe5CuAwm?_1Td_5wUTj zq6J$;&>^ctz8~;g8P|jv=YGy{Kc{2xm5~m8BA(5E!G+?wr#W3_M%?%jC0Bs&AM&G6 z-c9qiHbyu3@2TmL4BDUJHdL`t=G_&;r+ZB4@-+5`uiLaCXRx)05YuXWw;;MB8G7N> zg}u;`e3f5tSpRi^C<*J09)`9KjxTM^u1hxVp~KG8ud}1@qW;C2x_Cn#qPSpYXRRs2 zzFR`zcav@rId()rps9TCF5Z$KR<2`DX^JLjl-yi@^}5Y&{?TXR=;b`TE9U&dSxq>- zaY$+Ch7cRisP529NbluUWmWxGSzkMDn8dz#C0e1He1hC5MYSQFSCLreq6D!lIDP#g z?S#f>mOb_Y?pPR=O!C=!Bb^A1mYuO&TECjMS`pXqE|ftM`|;OCw&Noc>!;m@6XTaa zLLeskw;r>;vf=C2Kmg0^^jmoMF07E~`Y7myB6af4wJ8A4acRpWi}R-Vi!pl|abrMq3;Y0(08 zUaZy%3t$A~RaFt&im_D0cchdzo)T#^r>W`eP*g8W+?6bFXdY*C$h119!2Xhy)_)EMfR8L5Uo?E_G-_3puJ|_@)P7RgASzH-bN*|jg zjPlWua*Hsi7xZA0H0mQSk;jZ7nt>`bN;4^V9!ZZ5_aM*sc3wc5}g2 zlLG8Z9`tbcY*7%*yEi;@eb{e?e6TCiLQ@rVd&Q6g2MLQOjdGA+Lcii1eV|16?9yb(}#Nl2(s@X~U*KbQ4!GO1-YK_Aks)k`#{P|=1{^ci5_P3z%&zeayy zRYTeG@CIMv^#4}JM_x$ZZEPGgHyHS(=isAkj4HaZ6Tlo3PjpCx}R$?!GNI}qX-D7gl4yYG2 zBEj1Jlf+^G=3-_g<;Wf_7=5UmG;b(#PkeHKb7(|Q-dlNW^WJdffGvrXP3{XxVl`FFwaah_&r~^NI&SWfslfFM{skc?{IR7>2}ZU= z>uWpqB75-A9aPw%1%W+V4}11-fOcSzv%8!Y92A}FRl&GY|L%OvnjjQznEMm1kvfr| zQWU-FwecgF2y$@~(i_A!l$#HSry&024)z*e0v;c!yEJKX4K5b6ji;dG>}wCyZ$0?y zd16HI)*MO~pfxu8ND`;wu9p(Flmc$Dkz`xqO%^zu9}Ae0IAouh75>L zUEq>48j+Qf$+NSU^ZtWplz@;PQt4OyLZg3MsPvcddSkk}3i1YqE9%k2>74mtVimZF zt;zBnnz3UkUt;MGXnObCjq5NY`DCM;6@ESYwZrvaeW*lYqcXlJ;)d%I_dVX5KJSfV zoiPk3zuMsu)x1G+YWw^dnj2uIW&q5D{5a?*>)-JmO*{iIECuDnAHUym2eHq}(A(xH zx!O{#psjK7^~=QOyoXslP1B8q@%>fYywpelc7d?X;(sX%90;>-guD-W-F#>^Pv6eA zOws$(DBqV$j=PKhRja!kZ>xF=Xw;X#8ovB6-G_ZQwus5=E+CP7+*MU$?uH|E#(QvR z6q3`8H|vY-sItR`W>2{f(Q}&h`TKh}ucXQPSNSjOZ?|it)-I}(Wy$HWPyPx`1u%Qf z)Kpk`ngFT`J)JoD1wB6>Wzp`p4z7Ia`~IQbrq0}-mq85Em(ybZU~B>bV8>yzDCVkE zm83|&{VLX<`46dB^OIBEZ{i$QyzCi#?{-IDQ%*(Tucgc2+M`sT8nIipe=EoF3L6e< zukPmwk8t9#r6b}lIQTUJ--Rp@oi2qqwBR)JyQ}XmP_Rv1bpk2>Ax5?=Bm0+vi6lI) zR?+~2Db0_5OGo0rD0vW`m37e5OuN!u_3S+uW|cl5CgE_t6-ge>JaeMW*(sp@u{Y)C zlGeh?Zg{`#fHQAIfjlKzzson-8QE|iQtU4OLFO3S11-84`3piXB9zkW!j4k^FNu>Q zoP_B#*)qn*Ud^2Fx9<_F`11MZkb-obS}5V~*3eNYdfM#vS&5E_o=H~*UFfQU_B!sh zDS3;SV8CaX-8Tx@XQ$G}%|)tmnSMg7!(8tYJT(m&NKPe@e1^kouK^4h&UgML34+*2 zJ*m6&QGowz<0cCrB|dVqQ2#lcPS{U-hfHN#Tt{$?q=@)S^mtLB(WI@owORXpxuJ1U zf+zm6rZV^7Q;dy-168*h@3`Bl?`E)KBk8Ns7%3>m1wheoUIk*Wrt%Ev=tjHX`S+6d zkkfLgxAvO2>&2i&AEf<9H=a4f9QEA_B#=2Cyt`+Lyl%&MPswO!dk%CV)n5|5)eTc1H13t^`a4+%TqX^huL4%hTtAc{(pz*ji zTrh69*LdmCv3;{sA^zhV#1D2|hrNW6iC*g3INT`8tM0tif@JGe&y;JAreuQSXI=_) z0ERyNY`~e)PwZ53&}4Leu&*+7u!Tg{SFy6@^%O>_97x_gSp(t+5;i94{D^!phn?z1 z>O`-*n5|L{@r|)2{M(4?RB7oO;a;hy1ogCc2P-`9jXp4t@pp&$GXpyaFqo{@Gl)=4 zZfxt&NnhGVY?ln+ZtSK2d1yasMk%FE$$u(ajdW%b5^ zKIr)UCkK=6)&|Hg59b%-Zl@#jT&6JpVoFg09&Tm7_igrtfg)$GoV!}N$$BX65Wn4$ z?`xqInfKlziJwNhmdxzbw)Mu*b~E?FY4M?9!-vv2+f@SzkxMtM7u^BahwTIA5<>6T z4?g}i%K|E0rCPetDq|d+(j|SDK1o5U=EDoEaTb$T>f(2!r#{Vi z-DgggTNgd?$2I*3`NY7DvWz9@YdZf%z|$ZlO06uL6!pMfjUR2cP!a0Tf_3?6Hz!Xw zD|P8_{khI3y9ltA5BPHs{9)P`dFcG`#r&Jglz}e>=E?tZn8F5xfcGWeNd}Ow43=5+ zj9HzB<~*5X=l5u>Xc)&r zo6%{8G>`(w2cSRzCEaZz&0HL!HyD{PQ=mhg2lS}<)I|uSs;Ogdkq@4}G#KpN4@=z1 z6~7rh^=0NDt_#b@8)oucZo}YfV#8cPsaW#KJC?LL&Ty}B%PgiTucyK9QY@Go-D5Z&1m9qG6w^BgJZ2F_#6bo!w zt84d&{g8>x5PsEVfS9x7VQ5Y9dv^y-rJY<99pr_NuSmctT3OHGe_P~r@2(-MFh=ZI zYMfoaFa?GX!zEBId_u2kBc3|$dOS%Kh4v@=kNq@F+KaWh&Uey96`pec>VHM2r0aQa zdY(&ceN@f9ugb%BOyTy>+)!9Lb+(aIe&}`>fS8fvA?<366JERc@=p;nz(ewx{0fN? z>vPu(M;@VYYtVK{NxI0-G14ND<-I)iYURTr8mI5`dCzrI5_x+q4Znqa^hjOw`oe8j zf0v!YBIJ}y>%!mNwA?b#ZE&y=S%#8LK_EWk{(EY|Hnt>-whi2Z{d8)p$>v#HA;_>E z8Pj53FDCj}z8ij=8L@$>?W>>pNh7aw0-^Q2MlLy6pb=S=UM}h#U9y zKkBmP0Y2-8-ENFJ9Mix!{9~5Z7GD{L$O09n_9hcLVka)79oxAZ@1Pm0tX4wk@y3g6XH7W# z(|?brR(Jd)u9}E9Z?c)JAO-Z7v;3Q{*9me%U3Z0vpaS>CTR1k|QUljNhs}HReKure zqU9L8#|Mj;MmW-IvbFIxKuS#r8&|LvF63+R>HJ9$_py~uqBz+3dHw-+jB#k&%b3&& z?8|#elNglNBKSjB>RySB8m!RT`}(Z+wfd8`Y4o9wX)JIu<*zy<%GE@&KRbc~J{1_B zxNq0)S_0BT=dkCUG?4cJmWl>H|6lislk|?({^!|c1}>30g@B8N^I^H9KKy5o+z*Dm zf|+5H3i5}o6ecPV>Wh_BWwThQ$*bKb=^{+yo9|%Y1Ez6CiX&pBz_#~bPDsZ-|Ai&? zL0&=^vmYDc{_RIcz+nOm|E!1?(5He@C3D6y>f(~VN_fS-SeO_dgeEr4U0*n4q1M#? z1`Q@f26=fx*OgIt*@V;y0=v3`KdlK+cv>Z^M20oE3=-JimDxl{vY!(n;<1Rv!1f&d zZ{Is|CilHTrf0+66r{tYePK_jeKxtK~Z!siOTGnr%ptLw#+4 zg}0!6eb}SZhd|rjBI=msY7?(uEVUvNcQ;@igu2Uz)6M=zKP`&a7;yu(qdg}y_~y}*~e3#k@P9CQ*?l-=+n)+mv#YQm}T(!$KRF#-+l6u z6q?;6%$ABQzm!Qb+3r8-`pF!yPHfeKf9C*u1>8P*>iPFu_ej3EXqn}^6vSE%!A9U$ zvyXi>e@1K8zwvDqdo=B<9{c*PT?<;j<~5@wnG)YI zcW@xw1!j~st^OWcJ=%?P5jqNjZ8jAnktIdFc=-gm&yQZBY5kcCP*hPd z0sv)3XgGYPNK}SBEVab*U4coJhai)7i6>&@CHO?yF_znH)A6qc$gxj-pU1O)fRiDq zF?S%;d4C$6PHdrcD7`QH1NVKES=6tNFLJG5l*$VXt89QXSfrp(sMqO%O>A7;^&IUn z>t%3=PSjk5k=?KFAAo!P_SPj?;I^W74Z#8zC34yc!_gW~XblQ8sAJ%aMG`(~1I4+t zN7#LhEbWnAO2$afEv0SZhj7Wj@vI-VyJU((Q{_tR`*Q|k zEhujEjcm1N!!IvEsbL#2$6~PE(~yLfmtb-%-~=d?qyg(0|xgNS|4%9 z+`hejTz<{blLN<^fhqWMRR)w7mRiOsFNlt&IUNjiNd{=l#f4PjDCY zLfc~ja0X%+xYp$C7M!r6*E%7z6^R5d)GeG?kT&+oUTtr(y5(0cu=r(apjcPls0mR` zxJue-0G)!PApn$^Bt0+O`@IZbReGIOD*YIzzXO~ULyj$_8^2qLm)VS+^CZ>bjp53R z=FhZ^tt6^6u~JfY%EDtq1ZW2ehq6lt`LW}W5);M=;R7OahFP2);2#_3zbuF2zAr8* zao8yvPNrJ)2_q<#W+F8iWgv~BAi%YcGz$*C>m~tiUt(DM@pQ|w5RLdWmNv$gw;&Ma zpT-{c6cwD!`!LX_^Keu^bPVHyxP!nlm(5Z6tH~4!hXOt4YnGh16a<~EJE{2mlxaoJ z6Y<0zYCherPHNvO2#rZ4`BKWa>{9W_&Z3FJqzD0_!h82RdUvzr=oq;@4QH!9lgYP9 zI&UUSm&S*67Oy62rG;UBxe)&yf;9D_Eq%QlZJuyv?wR`H|==z7EQbPXF^C(cpCi~%D8R^%pLBAP?> z^?w>C+!dVSsFWdK)xU2On=FKvtmn!QytP)${}wnb!=Rxepjct{^41)5i)L*=6FZ3z z&*b=rB0O!x5>MRY!Mx+~TONq--6Fir0ed^(`{VBL?hd)V-$4b;oC&bO&NsP1=MMum zsPcHf0b5|E*8aG;Kt6C2LKzYU+q zX?*jz^vSmU?Y4$c9Ocb-YuDS@FF(nDCGD9b;5Etr>)=jV_f!W&zd^-m#S^rowtX$l z98W+Z1y6BVm)emB6{}d*xOd)g5Y?x0Fm&nki_o@a`zBBQmw_+s)OnLpGy;ah1r(mu z;3UC5fJb3Xb+Njd?RM^{$K84EZJp!hDfK`EPkQz^!lBYe=Pstd@;f3>gR$O$# z@kQpoe6KT=N|P4NfalD5Boy)vx@NA^Ypt&kAGZ$d$LFohXHA!0zz+dy9ZBk0u&IKl z&_4oG6b_b!>g?@W_La!|41)Ad=}>*#`GMaX_Y*&HM+zbcOMahxOA2ju7Uz$;l`Z>> z65RSEdcb{xz{;XqWy5(=qMh@K%0Q_&n_##T%t;>pM0Uo-rOdUz0BQ39<&Nxx)?3hQ z^|^@Vk2oc9mBD?Kj>3M%??>E{z~uq)cet_s_ufmBZoFPs9e zmpU8@HyX&l)Abg<>Cl4xof}w1Vwt9ji}*dOWI$aT6RS~>qVS@^ZUQBKG(RIHZzQ`A`VHz0SA4fAaovKs?M^Je17Lva$i!nE!pD z4gyXGWyhR2z$E8SUDJIj({(;qGwSw+gi&JnoiwI~vZue>Sd&+E91yB`gqrh=S{&YU z%j9^=esqRg9+?})B^~EGT&La0$-qkcgbb8;BD2AXpMH4km&jZ?&7uh?Pz)- zQ_t4S8$E9L_aO4WsFo6-gH6C0zR+RW8-ukBj+PDu_u~BY-VRN_vjmu%yT2Rm_JbR@ z=>xzF(O_>lz60$FIG^}oz@JUlA#e_5^v6Hr9|!XOp5=`RzQJb=&^d9Xnz@gcNb>7; z_s`p{pHj`XR;_+FS@q}r8Oa--{PAWoP&+6a?ECA4TyXLmyubS%&$UnOwflPNp%(4m zDX(QqL(LkEQj53)Dkv&G7|NKw0irz#iL(A8AQ{Sb?(GmzSOB%ZWYaJ|2$G=X>`@&( zC&5PFKXe7Qzl#Kw#FUpZLsq$;4~9!wAJ@-M@xqRefBd?SH_qjHiM{ZR9gENx!MD6q z%cs$!4)4EuDHc)$Rgh94*(V~X2kMoY>)G3EcLP&P=$#i{lens?gscb3i|WXLRwd09 zltkJXrPRcrJv-JU?3R`qa3+-o^dqhaoeijmPe1-%W&x*m-3IX%dGc~K;l%@LZKvwGf?O65P&}aodOH4ttoMS1MnZ4BX z(OFaBzN$j5K^fdgTT`Rz8%}0RndfrIK>~2hfN*|6!3UP-Fxi#@?Mqs7Jh2whp!f7r z^K3q5cV1e8QklEPyuNAzFfk3Ay6G_gO3%amLHAI!PDWs>^N+fLsP#-~_J(zk{A`=d z68)Oe;teG_Rafh_Yx_!6ax9|c+c1^PUWzMad$$$LD~-#K^B*%GKhoC^37{P=YTN`4 z>uO&m-Q|1x>Y83O7heYQqoP=Y=(J0I@^IC{eUG6KwLceQol>&YgHE0Xev^Uuun7fI5`XBN~SllP@9JC35IdPv6p zoHo34Yu$c!(-KsNzxe^NFZ7-T{5a9>@C@vCxUZZOUHs$p$D=>TK3{&gwg?SHn_k;P z>C&P<=}gXKcBxD~J!iE+zGR#7yE!=j=M~dCFD{}pS!EUIK^zB#+askPXU8&)w`No0 zQ(w0B(Y!3U7U_Ljq9@Ii!EbGrgB|Qwp_N)X!F{&4^%II_Pt(E?`_NPImr*q$ARj=)km;3QJc5>(WBcDo>*KUmaLv?L|x za>SMX&rTQY^1CsuN(nXfY*YrX+Qrfj!OMBZrMg8^vaa0Tq(Eu7=(Qk0Dj^1$<6Faf zSeeZ@hIxQu4WgF%)kuo~h6s&~6j@LVC&}rZNMo?IW9r<-Y$SSo=^f@V3^Xa%dR<0c z!6I?g9D$tKb)7dfG;}ix^e-CTo+mcVUtjRPc1X(_-R3Ibq%9!xLQI~`KyaBMMnFnh zuO~+FSu78=J+ac$Dw)~azQcSy8S>a-yLS?Wo>U1GRa>_qF6{8mZc#~NWYnpONyv9@ z(2u^tzjx7tR0-MU)frBe06#3^O3l)@8E_8(EZK9>$T}|ufDz}Lep^c%IAo39ui09I zF@?+dLeB%Be80Tm41Or*R^OLN8WbW<6n9v(c7H6rW|<`WJlbr@aAWN#7AHm_Dl02) z;lsl|IcDX`3W;H2tBUg<%yG~F=Lo`IHEqquHpf0Ii;nOn3;g5j6?Z&71>Zn#@Njd+ zSP{|BeZfn_mOLPus;Rcm$FyfRUKsrZXplzvD4LkNemhSaSQvWth3$MQ$Q(JWn#=wi zopF}qvqW);#5cAx*DbjZFS_VZJ2)FDtdwNM$)x)dkxtV6Fd7;^^{jb4)_my}%y9qs zi?s+&oHjFqg-m6&M1F<(#pT?-Qu6EQY5f+1C}_!Lrcs;)z8GsJkQQTGfvzY4WjbXL z4%7~$uoKTjY}-B^0Y)Kx8#wJsK`;5e5yA3oTHjeIdot-%)Ppzl-f9bdonBhZFGQ%6 zS6XNj&$HB3YT}2(ewrPhmhKn?)O{wn?9UXu?03{KnGasF?Y$Rrk0Jz7P0I-fH4(Or zDSqaRVuxPaKfN+zcBfMwOKZvqVHuq-g=t)zmh%JL$6QQF#7(tijmO>wwN(wT0fT8a zJHdxM9W{wTCd?*ef&|ysj{CxFq1I==DD4Q!~+ude@3pl>e0^FUnaW1RQYp zuZUKC1vqPKc=&buLm~|Ie){cTNelB{4_=5p#aXR0(wWVosA|XN`^Zl%xJz=(ZZm~V z+CVxc=bym`+yqLVQtWNY?4M?LajJha=`;`_;vnHQ4;{+RVw&k>OCg2*)>*02QS8;_9B(?U6@4S!C@7IpLoiMsB~8v4g4fs)kEBqpuOYBqNX1Xm z{A1-~3l8CDL{b;~@@}X`@B;t70J+!?K`@L3R8)*B3k()C%{&0S-0y$9-1m_SZ(has zhA*whJ^%_(ceX+j9TUr|R@J2PcEC6J-`e>Pi4%yhOM>$8 zHtVy2&d&KSFYk(r{N>H9|U|Nqf(-9Y3 zU8{K-(Ddx6wC*eUIPD{D9r^e&FL;9@?tWG?PQ`a@!5h|lyi+as1f*Dk7u^4?6(zzf zslfU{Dk?M1e`RW2C8fWTnUg<;9rlK^{5NYjps158Aa!kkxt~XsnysWEzIib3?#T>@ zDrJ-K4^u}Uim1XHdJM=KSMs$qZkdLN-Nl7k;w773mC-ALR~L_fr=AYRDMG8=>oU7} zPTDP~#>Ib4Q3Cq>fYV%T$a?rKdCgLYS3gkLV$2HlUYbT}H`fsr#n2&X5=TW~D&nqi zlil}|I1~PC(hxAD#9a)$Q;+^dV2DqWh)KMQ#CBcBdn9*FTv4gM;pZ*fa%djNKlet_ zVK3V1m5!%{??{iis&^8SVMVy@2u$8ZlW8AC0GyhEGsUb=UXktj(s^C%b_mdNzTso5 zkO=!32Npbss;Y#tX9-iHr~i8osav!u8D5s{gl49JUM6v1))quX!&pLGAIly5$pOo4 zA`@UG;&hBG$bL>M_49PEy5T++P~S(`*hb9-@@EZd?lc1X_fh1SfT>lhv$74)blfOj z`czy4!-b_55PRF6VMbFE7d*Tzpc-IQEI9mUnK2x=0QTUTsi^2XUMT80H+x#9l$W<8 z2!73C8*da2^U1({WM1QN^c-|8T5SwI-;I6GIQktQmZIxu;W}S9UX>Qw(u)WK2+%y# zMhazOCyY(P7}}f@(oRos0^wnS+9rj(y6Fzkr|H$!&QxJ1cH&G~mhs$<@CgU++BVYFf9G|7lpIS%q_2DI+fZxRyRd zADb?Y#vxdWJyVIj@cDS4{IcJ1C@s#pB~+QT)H4b*b&yJXgmBp_^BKI}r+O~V)z~zqcEz0Ws6XhWl`}j$7A`Xy zfau~Sse#8i<_w3qwi7ycDg>fqAo!OQ1V^=!OcX+DSKDR0P_rxK;z9wJ(xmbHoAJ!H z4OaZ9vVHHfA0?2z|u9AdiNp_tUD!lMvZL`c!t0=%5VZa?)7!?xG2B3lS!>cJL=XhYWc>_OLUd_egE_}Gicq4ZL_$^_R zIT@MY{_wMh$3NeGnx|fT{I14q6v%N&2XV6aW{qUgBk`@Cw`{wbbdHLrbBl&R-@96% zSk(r9R?e%!9M=GkcYY5PxbjaukuP!LU92~7g-;pC88BW_Q-*)r#Ljj*Y_W7mAKX^S z!l=k^l)xH%_DB5kRHfv{@0U5*DaYNpyMuZq+_N=B;Yt_&{$0B_O;E78WTJeRH5Fl= zA&Or$img9rsXx7*E@IB5c{wx}1TlAH<^~5bvxBdd5>W^>4&nh86XEhnIa`14UrBqw ztE)kDa|k$aKBq6c^nJ5vI-UB=y`Tw|mh}K`uDijCjF+44ISp-HPoML2)w;gflXQ}x zQ~!u9bPJOdB6DAG-4c4<`?f;zx@E%~U*CsfJCBX^ilUY)`!(x%pZ9-CW;ae&7atpl z$(bWG|0~_o%|vkWuyY;$vh`jyF#b3{vb&nBil?8=3TjXwD^%ba(@{!Y0>**n=JVQO zv|NSj<8a0?*X-T-sj$GtBajh{3oj1H4b0z3TtXlIfEM|6Je!BgETwl^X^3JtvZf1rY zV9DZviuvNx(6jNNOt`r@%{t}3SjcZWvN2G;9B%k8DC>N+d%^rlotHYPGkH;|$MSTJ zj@l&$RtRn3FJ8`vpEjfX_rOTa)?Yj3(}k-&_?NU#9j&|Dzezbwi5jd z>rCA1P}kko#V~7I3*Kmag=NHtKnaainXczC&sh$GzN9gvvU>F}l_utZ>@M;AJDV*b zOZaXJhi(%KUPDD=XpmtFNomv9HSw?!uAaE22Xo9FfVd}d6C!o)Suw9 zBhFCi<8S;?^k6oN?l?fYc#$Jg#Mn-;l);~&_y|UMd5b`D55BsA=BAofGFbF=e)M&2 zh*LO2&{kA88H^;9YQ`G&bi+VJJ7~lLMq}Vgt|KIU$c9ov8rfq04X2Tg7x15}^7BBv zfa@@umctmS-Y^1WAkR}1W4myjBgX-kW_HtVAY|Wo>oHyMzFH+*kF4{&l?g#DoMNv` zs%wILFni33=Q58KA&BXPgC1b)_onv$)(XO&#%;#Cwq=Yl&XL*~TW-#hQjSw5;yJ|} zp^E>CY|k++cx_gR5|q$Y7BL@r$U0hUrKFO~v+~)p_i9R^>M7~l43Cd%zghxRM$Q6_ zza%DSo(b~?(-8z$HQ9$PN5EPn45H(9#%wBWk=< zF-ha4OBN~niA09JJwV|6V!*27&oLEn<#fJePM5*6zoPtzS+Cx~fx8$L`N|}lA%@L} z7gabK3I>+^(Z12nsSAo9yzX~hj}o_~W6yY5uwQ_8h7{gaftbAa&1H6XHt*J(u?GC-H zZ2yx-*4vNs6K-PSV&Upr^EV(>8RTXNpHW$T6E7SYdZ3>vXG<6U#ElF9OM5k!(PiWa z&E2T5L^=GLRDkqHYog{+it3%e@-%VuknBXU{S+yAEi?;%)OvJ@2Y5gl`4`&}|Fm~e zMiL&h7>%Ci-ecqCZRM1H9EgVQj;%Kz8~v2WosSMXwsyjJ69sKwp4Te>PHdLEzPB6_ zI}v7U`#CK53y**Z-s^kviAAZNJS@3@AlK5)`6HHn19kf~+axQE?pX#iAmQ@A-s!Q# zi)F%{xJzQ6WEzYQ4BoLPlCHw4>d|hOEmY8B58TXyUN-CVXI75EAp$=Pj0rap%>PjFoC;#U41 z#!GuPm*e$bC|o7&g?5Qb?Irhsq2<&4xAPYO!(h(|G;(hr(E{FMm&spR7VrpQ)#mH?3RqwpfHQ!Xp_y7IeK_=vJ zrJw9%6N7=Mmkvxz4<5HXQ>|(fIPIqeBk6G*)kEw8(BRebI*Jhi*%P;R^ z&GWz0-$cD8qeKlf&$zTMF+EsgM2<|89yF6JI!sbWAAvjm=b94$Uj6Sf9swSiIyZEy zpM@?0Hc9hgQxw2{f zl(}1u7tQ{AtCZx4!!8oboW&2)Zi{tU72K2KU_m4Cm%IVpw)}#fDBvj~D3VX+EB4qM z4yvXDkvz*HkVzn1!?+Nt=@=TI#$sXqU(Hhg;zIAG()r9dtW46*lZG~`T3~B79JZOw z52y8;6xb@MJKT|GR{Q)MT)XVC#Q9(qyvd|Zk?F9d%u#?8lEEc#`EB+O{I|!?Je4N< zjhZRMe4bmon54pb@M46%KVEl0*x(hCwkCWKAE!k968|cV68|ri~ zyJQ(a79x$br?ztG<(BbisA~+~9VR+^)gn=1{-zvV3es*+kD>o_n7QS~O35m=f|Qq2RDwQyp2Gv$gPBs@7y^Y3*&+L2bHW(bQ3! zrc9~hQufXHK^^LcH#?qLYSj}=^-zsp5SwbrVQ{#-yocxctA#SzlHri*h>nP$mb;Q3KGLZb?9K=$UKoRG z7>$v@->B>@7=*&=ppH>b_wocoVz=0{P!JQ@26&{M`%7{rfrBje{aa3XEk#tLTFc;4 zbn;~14PcLDk}@8z4cV7FyJfoZeoBsCX_qhhi3 zJ9^Vr0lN#KCAm@cGc36XnhBy+ji_#U|q-N@4!1*WA!t;Nb(kh=J)b!s^WW zba7JSm=OSvI0ar~u$h)lNzHh=MYL*BsyDn($yeP0h6!*ukL06LBc%>*2xF;J#(N!k3;P{qVxyCo_V&T+wGnyUQ$9gs#&HSh!s0fW*X6r zTELr?)`~rfn`p5^!t%O+dF{{aU0qeD5fkE3OOsHAa|QlMbR0~2%U%CE`H3b)2m(&+ zgGZiw>($e5Kz|qz`=q)o0Z|wKuLk=eXG$qanFIJTwOI)ZD%%O-eCXzROhrtL>_x>J znV-n2cRw=F(Mxn(MwCWdlztg7!S>y3)KKno72kuJQl<1}A+!fAsPQeCm=%%Ma;$D~ zCQ?V_lPdw^JG=+7{k2R$SJaI>1PHXf1~FERm#2%!EA{2+`huMf1H|Nu_X9h^L*)Vl z#O)wV#-tGV6+Tnd3jIF4~5zWxHvDvDn?jv+%$e2%A`9T(YSnG z({GC`7lmLQ@APEJYkOrtqM5zyO}0hAZUviPZn>=EK9)b=>APg<#`%!pC9XzZq7?uPqTf4y>=eB!)80#=u9|;WP{5RWc0z-!*brFX5_1w-@jN9>I|Dbjiqw+`oFMsm*`eOw!KB3$t1#fJY}e;ibjKbnJucL>A@kN$ zc2|FCT3FPHJrI0YSU8lDX@X_VRPpPW_kq3nV+(szVKuRNZ1}I&69av_zd? zE6FNsMU@K1DbZZfQbVzyv(#C)_PuaC;JFo*y1lNPg7*4(1;$56{1p^jN5?Wf%?KY0 z8(w}T@fyi!c{Z*s>U8R`0%Ym!sF$4Dd7tqVu<3s&tM;Z`K*{nVj8JM21@*Mxe_sk_ zAQo0YW{l+tNJ%|?c&6+)U4lZ#2*s5911NJAwdOiau+r`1t-NoyltdfRpO3lbHw8Pr zgr7Fm^iG>|SAuGud)#mSSk(+@`TL=CoF$^&9JOj~2HKCk08iFUQrfr16(FZIj;KSw zW<_r{-w8RJqDpqdsV)Y2Z}%&&n5Ht}TF~uyakJ1D@Oas2&+i7R2oFTCDyg8Gl@fcA zuAR0Aj7PNi3tI|!&}>^X9OZ|K3GEAOW{*bTOgwRwQD4RGB<=(t6x_b%+|u-ivR=)% z=*{*=AQVRZO7o(`ody~)5>#}kYzztv0~*rD6BCGf5FKP z9P1@ybL=h>{~8flpBVyI!xC^1Ydj1;hdgD}RfyMT@a?Nwy+y5c8UQk-sq@P4G+-WY@$wCpC3R8F%WA+;8 zmF2EOu3@__5l~sLd15khj=S;qB!?SNyZ;C#Ubu@-Wy-f&Q(-H?f@z?YB=XNKWX_Ku zfHTGve3!NK((1JDC)Hr33B9^F1}L|eugx|7>Ym6S3CBz z9a{HRG1(gP>^obthsdDEZ!OngYz)x<+__Fu>15L-?W>KK3>Jq>^Mh33ZmG*)j9kHp zTJp4V1rVrg{~+PEznc1Sgv9$#qWkxxAFLQk%5-J$$X7{9jlQ}vm>wPQD*eeWd1bu} z-ROXLMwItg$M_=8n=1XKmdnrst;kdmwRg3Yny%UdL2JqHzG+a=*{tH*eYo88Js@$K z_~8xkg+2CM2JF-JJis;t;6W`-75JUr2QCl4#zvgh!w@P#MBb%e1|xp!`G~B`Oh($- zje$8_Frve~(Fw$P!1#erggEieBN~-Mr81GI&lZ`|sMN|y?(LKYN);cVDjaCXDK924D` z;>=h9f*jaCc?}N+fQCU`CImn{D-G4qFT?yf9&htkd-_di#+ zT)O+5xNjWd?1#Q)!i%u}_tMFtgb{X7)~`QZcCi%nuWPa`DMLytjP2_xI9J z6V6hTx8<~7P;-mz-+Dyr{9LaY*XAe$)&KJF<3s=SDjz@Hu;n7AS3VZ>M$R4|2wnf~ z!oYH?^3>ZALns&l7N;)3D^8(ppL{|x!s9#m|3+lyl%929I9)qh5x)9Wl25H~F)t|c zB?fe^s86b^--VWo_>e8;B|8#EA`FLH{{mld4fe!>yZYHW5tc+p*zYZ)Ry4yt8J zZ_&+<-1l)1+4}KOlJ1mrj=_ehDrkhu@0&Mn(2rP8zd}o?2^IEX-GpoH2sZ4YEq?^k zJi~pJ;8aVGu&zECj*no}Wvv-}a-_bY3nPc9edH(Hys9R)GtzwR;}Y4W^%~1}cSSSl zP~P)6O!||X9@jrZHMWt@SC{Z7?ZGrR-H9q$q_Kn0IO>aioy%pX5m^CDVUBLVKxuds zFs85bmrR4smkQn=$}2OZcL5O!1Q;o2n;2CKLc^rVPS-sVgkjo;=sV`_=or5VP#lRPyITp6lxTxGI}Z6{8>bteMvNDez4<> zZRB2KZ260Nq7Q`ia&8Rr>F*4c|K02k#02RqLN|fN8#^$y3>+FvRr2PQO8X_g2s_Su zM~#}!PJJoJK3zu=bqG5^`OnmxYkzWcs&Kj_Si`rKtze@MHM0{(1GSLI8gM2ap~Z$^ z|K`AejPPk0aA+5tZ5hB}X^Y(zXl)z zklh6IjJ%O8R8pDQ$s{l{nbVCmK3D}uu(XOJ>rr06+(mF}-4H`bQP?l3ix9k+foNk} zz5-+_7{^(r!1uXuq_bkB6ubWLgo3IseUu2eK~(w&D?AxYVRjCDp|gnMhv`dM>_ras zRf6Me!m|Q?5PAUwuHDrZjb6s-=>eDw+7BqRM-IftZ?B`Hj)*uHJf!@I=Odsm_|V3I z3E|Z0;=Fvof#!>kg~s*v1X*68?V4`k?-WsM5x@!)F=rTGSykK^jL#K(w>A4`&MaLE zqg5rmt|f>@4gNP_C`~yjPv@OjJC?SGAf+xGK*eSGZyA=mt|krsH27w7yr$yS+r~TdFF*R^ByuCWxUP_eaB8~K(5d$J zyA%%Y6YA6}{6$>@JI}wp?vPFtzX??kdaoVZb!P*syXp<{Aztp>@yT}+!|bFgp_3K! zU#L+WdIthBSSfiBF4_S#Wh()Jn(-wPUP=vmby8B)t;@B5Pd-2^_N*-H1w};+b(Jxe zcZ1NS+Vb*AI>~ZfU4Jx?MVpN%x;(GYwqCHYLjELz^N_c-3|JxsPY?ZLjcHCyaw?DL zmfb8mp+cA%j*~%;`}^&?zu?DvIsKqC+T(jTTv0#Rq80BuNUKgsCi*}3_#d+{34OIN ztv%leYOb53Y}f=fO-G#&H4!|#ND+<(dZE+-3O&MGU=dC~?+WyXQ#Aqgl*;0N&ofo> z=+>mMtYw$I{pHJr$1mmf22=cJcJ|wwS%VSh<1hs6F@&}m;nsF96(tA!!Y82L<~dIbrj%Lof5M0VH_C@9hpuu=3f`R*m&pGE|) zjNIPZ!wWdT>v}@r69hY;|4K3ZeS(^E~Y5<^-Vw^~% zk5=o!)$96sHf2YOmCw;LryRiJ$07BCq6O| z?Yi1gEOt65$qWm0C%ExsWc>iq>ho9SV{F(oaIKZ9O$?BvYJq`KZ>bff`4F~}rkuFX z@-*s96=KG>HjBN3xvtH#{9?iYom9zR?Wl=A$Yi-!Z{pfhn|WYovjfDF#m-msO8Yuj zIOEAPZBPdprARYI&6qVI-`oVSR6^Nx1OHTYMO|BW+NJZo0?63WET)&DRrsZ7&BD3W z-}@wfCFHYtNb6m^%?b6oeuJmU%ObbYW#l&=?W`ab783KkTv@J>`L)5TBvP1-e%Se@ z4N%jX8=tM1ldi7_29zkhAH;rl6`AQTwmAOdSz#*lq{*a0J&NL`zP_e<= z?+3GkcJwQ+En%ktZx!|htjg6m>oGcpxTWTQ?JKW4!dl*Zg|8VZ8rZi9&iX8X%~U|I zFBSkqsV@W5Fd$agc%*)uFgx$d@y2~agA$MN<#!-!YoILP2iUqaq>@u!w#$tfZFPh% zzy^D`TgiBjt8Vb7!kjuo`r2H<@6m;2N{cyRa1fHg^!jeocPmVjzqZ?o*M$3X&%EJY z?=?m&P?A6Iv~v4Els)Kj5=FIC9_eA6mY00bKAFFzG>FPi2;+4^olUBdn99UFtLzf(0)8uNqA3s> zglMUMSu2*8rAzVx`^20tc2$HPPI{dLviDuz9*;&;hwXm9e7-{8C`-cOyq7hPW~p%A z;No@9TeJ<%S19tQ?)^ZS(Dm&1uMM?75=#DiM+&9LIqY)A>Ch3dq3AYZ7zVbfLfCV< z6Z6@h?rvQ{`g@a|q|R09t%&D2r`w!BdeK|?xOm8k)`Z8%UYlYx)^%mYK(_6^F-Hn7 zg91vxE+VflzSj<0)6dn*gIRhE&pk?I;SR*6!igRt;rN&)d|z8XymwlN$OxA8Eb!EU z9^BO%B=q$Qs#aZpd37iMd^|WmvsUPi_jbM6Ph=*g+c?i0y}8hB$Vzw*Ii}`8&-w&6 z2noD?488l>s!F1(hHgq-o)+VM^8CSf6L^`2eF9o3;12yRd*bHxAs3qeC!6i~9@YOV zWBdEmgpr+Es|j)i1%-`3MDl<|J4?*CUB(N={?nPVR}Z@jd;mu0;i1`U!ZApa4$`|V zRmJtIX%PdJNaYcu^>H?c0C#T9)N6CPys6RoQ!>Em7H_RD_@M9 zFOKaGoTbHBumcH}At6DO{`8}i6GzSvO)6WD&9C$Cd&uwme|c5$1`Nc-3}IWSV7C{I z#UsRnF;q&U1iI`vlw=LSm3HYM60%IQKQ&3sS95I=r@g{X-#1^R zP=vrL0M8Tlic!p{*eD_LuRYd=>nv=bA*E0${IlEse1#rO?tNs0UrGT3QmDkuAKuPL zpMT#GL5_%>m`!*xHr**T68Hh;St3~FXvf`PABtyu_Kt5OS@_zI;3MY*ssuh?t)@Goc z{iTQTEAUUhN&IMM`N>=!2aG8Q)2TdlARjJr&biLk_ZO#=&df0E>{D>O`;8F_P%L`( zm=y_KRc0_A31@NGBx}sUrp~abxckH=%|Zr;1?zV$Sqglosx)_9kKIfk+C&Vy7(2;x zwe5c*3VG-prAk9A z=pDo$lLv!NNNiazYeUjT(eHQNS-?_<#2tTEDY%LLd7N|$Kbeo3V9SnoyoTj+r0TMy zBo(+?-7gPTa1D=922JqY9Trz+)1jb}UcG~etD_K0<*7a%FDzHU$3J~or+oUjj@fam zD;g&GW^p}+bwZXev|X5sZ;~RpsGa(ODJa8osx5Mt8ugYoEAhNE!wbk*xXzyR5^v{e z%qq;F;?~54YJg7A+(@WmHf=&#FgW^MLEOiewg8z1ngJY7K6Vzkq&rMo@j5v4bMG*Zu75|zJ`W{f(0ivdAHEd?DSDLj}e7S1zJ zBObt~sm@0Dt1HnTMjE}GK=&o>Sy1uP{CxQmf!P0xAB3BH`FOAsAIJJ;hLn!2lOz%F zxqwYLnVZMk%JJ>J9T;Bs57gLp`^?Q0{``;#tvKbc$WZF(hHU0^EC;uGbq#-_dd&@H z-PWheCy6X$oVD-V-pu_bYzypl3Y{odazZX9c98mNgA;Yv|M*FI{%0Pn4Viak*{{O2 z{9bj{Zu#DiUr|2w(3NYu9|UL|PspnMt4S2tPUVdcdhggaX<+L@`PxeZ2H)>ZW*YnC7|phQZM zs-`ZLLST@_5KE3ya>8~W-uTP9ldM&9F0W#Z#lHhWV8%eZiJl$eZ__y%fii0+In!x) z6*wJ*9?7bx{@tY->MU z+Lpny5Lt8%CePi4GSU{sb5R2H4#(yFDVpmA7+qg7Z*C*^vge!ccd!HbINrHCIzovG z@Pq+GGXH4cn%v5PB4pY{;`r$);L~#$x~PMm1`4QbMtmY!E0MG+3DzcGX{)NqE+25H z`J<@DSu%LLe!!S2s;c10o3;{De6yu5EA9^w=bT_5oofA@R30p5N*tl|#knk36z^9@ zP74F@qX2;Ph=ay)R_qJP=zGbGut_@Fdx%@n%Rs&!6Gy_Q*wkzD@T;Bo@i*erwI;NM zjyB|fw&}QXj#S>=_BN*`?}KHYMoY|y9!_=%{k+U^!S0888PW!3KgNvV%>^g8cyeNK z?(Bgj1YR&R41OE??-zQ3Cw83~&;UJN>wedY9HGis1vP28` zxX3pZubg9J9v8{49vEo_Iy*H@z-L%g%dTlSy9fR}k63Ri z4rEYdp&r4WfvrjJAa1w+`z^ZBryLJXx3_E4P+LHg>~K&n<@2SmG1xMC*>WZfxs;&H zloA4YTzuZxC44ebq5z2!a};D)XGV@%2{^i3Vd22Fy%6SuM_{u()Yj}D9Sv15r%wL3 z7tOD}SKwqpZ=%d8+md2Za4EtdefsIAYdCAJSM}b-Ufp-icm;8$ci9Rr-osr}2-jSqkk7yCx*~vtA*|}p4V9?RQx03SR>-C&FsniQ^taEP3H_XiYX zr6%{m#vtBqQ~f1LL=biq@^HgdBF#-`zY}RMG(ZTG@|QT}%2b{ZS1JBcKOSnCn=MI< z50e<0fNWhPnI7?@prHfxl!LE0KJTLqMrA*(=~GjhLmSyECHg3V92)uT8+aLJy}qe3 z7~(8%y9(YO+x83FwAoU%XY`XTWAbhVEWQ00V5Ky{nkI?Zt%|V(kqT}H1fUBpIWVu= zER|cd!`}y7Wd>wu1T2>MQfw{F%p%dw&S#yP>00I>VS4L9XKF1=LgeHA&)c?AzGRJA z>w#_k=fJ?}!}-L>7sV2J3iu0eVH+Ep6Ob@Q2fTE*; zbDg3B!OHo0?oTeM;*@T6Nq9jj)vo~n|Fu>MGG2g z*VJ2%GOhJkMPUa!&f&vmnxv0eDEQ`L@e!z|y#xYwieDD^a9MGJ$09qn zP563p>f-lT*PJ0X%`|&9Y`(A%t(v9cUY0S0mRTQf4ysFiWpT@&UJ_qPv#iih(!MY#L3>+80!A|7({pk z|FuY_ltwjny}$T&n<%;==tr~YEA{on z^X}|s<|2fb0zgUIj`plf=x(`U6!f;*;v~HEHxi29G{#PU*viSu7-4e3?A0|5YxEtXmdKK4tL7!GR->Ap!Q^iI*lJCq|`gRo@{QGtYEOtYUD!d4VDP4F)9 zcNT%l8jif#DiMM*CF#Es$Z<+Qx^JWeXvwNvXd5ltPSpL6(5)=!jG?xC zr1iT%2MwpAeYgJ)UwDu5XL7JHfh{+)`&iTi+h_beO@?9CgzXjqJs|tt{0lV6B9HIR zdtw)HmSkFM_(raHP?TBp3%evo@TWe^i`$D#AT*{3naf=e>)a!svLqkdK=5?fnrqKV zs};)~UHFG2d>cp3UpC)Je3*7XjbSQ>nn0zKByz~O6f3|lS`nC{xO>G&Z@<6M-CRu*#MfwE;9r57x@@N@kgtz1c>OQ2|6y{^%W6TA(V;_w+)q=6{Ai#K5Maq zIXFq`nHd>t_4=$@9}uzH%kpqvxU5$>xrsOot^2Djj%Kh+<)rLHUcI_+%Cju>*Eds( z(x~NCF7^b0q+$jzm5-7%3+ohOoUJD#YW;48Q?UQ)3JVvOxq`F2{i0C2Rx^p~+J3%@ zPW0W_-2(j*=@jUfKB1*tpne|L1O_>+XFExi0aUpRgRR%~t7c-U>rnAJ;hQZ%^FNUX z@zc;`)KB}^EGo&3`&aj2Z!N1t_fAN_@|VCtBxQ5%>nH&~ys^sv$4dmKZ;r6Pk|2y~7l1-oawt^4nbeVd-H#>SMn~x;Kb@k(sV4V-b zCg>C~(Jv4=@Z@Ql#bhpCbAJ3t?svVxo+B*$M&7Zx>GlMS{DCS?Bkvl?DqZs1>T$1UH)J-T7l^-(XJZa%DJ zlu!xI-Z6*fk$9u1n-ywT#ly$N*PMqDvNT&OrZD?7;nGf@XsMSc9jvdm%;uZBewHQS z$(z&je6)8l{;vXIVKMCE;(u}bV2Jn;gGgMTx!WfE76d^b5Y~ZT10$Y-BvmJ!`usR) zdD%%%i-nmEL1p)s!gmJ!O|E)^EW;rxbyqTAPE6pk6}F5Vwvf+;W_m#AYmg$%W;A9f zr~G1t8)q$!$-=tipw1Q~im>n?=7!66Dhy%@_SS$SvCO>Cip^0f4mJ^6lomYe+1gg< zQxkfE(c2=8qHK{WAt`Bcx(l_jmX;R1s3K&ZE@gakL6$H+mv8{W%>qkKu{6U#uU!9E za|g%w7sP2_bhQp9cWH_hwxlmydsAea+CKtcNvqC$3tY;-?M>~>kJz;| zdQUv97?UTx0zw5oY7(0D8uH--8emOJqrRL$5hE#oZaY=;o;qEKGuzlWikBf zifnlFYU4%nf9{wJ2K$aK%|d^zmofu0tF=Hl1vy!NX9ovuNq$s=5~>NN@A2H*k7-}t zrS{_qpc!o-RhGR1Hb4dLPJ(wcAH1q*$YIL<&6E z60DY7RK19%z1r1fxbd4t%uS))xNP)*fjWZ%g&K(f72$L%nr&-&{fX!4kK+@9Kfv6C zANxPw@9yn&17vfZ@4W|+{=x2ULNE#BRe75EU9 zMsNWGQGNqR#4+C4Spsi3H`_c??AW*gL@wK%b)_|5Tv5q){m%P_lupO=A(*D{=E@e< z>(2!(*t0RHF-c~^sMmEn#78c|{-0ja-HkcApIv$LBwAmjlOi2mRtrg`F3!#1g1#xn z=py1r={NpDqmX&%}HW()8zK_e9j_UDmC6dkWG+MjWql)=m zRZUH*IDeg+KsM^H$j`HS4&UK(Z%3`nzcXSPWI}l2gJH#m{4J4(^r3A@R$J^fSOjdd z!9;s9Ld@pNbP%ZHH>L%bP6K0OUx1z5Uj{ncIYJ!Uc|f;7(hHUm(SdAO%PnIvcBlC( zy83$DyE^b!Sz_{iar;3xFSpuQe5e}S#j%)zq-utABm`OgVG!yzW4ADiXiYWwvFWjh zSY2v{|9XrpS0f%zL|C(29@T=6yDX2M+kc`7;)VwxZ}y0_XYFERa$wsX&|j*U-{LUY zQx_CDfDNpg)%tHrlAcC7AEHN{?l&>~Bm5r*{QFWnE?&ENcnni!=gl8%p5LB;e=u@$ zH{TgY@wh8sL=VGGtzM1@%JV=c(L8%xLo2lEMo{MqA_fGurb96P(UA z?bVH+Y)6M(j_d8%f+-r^A7<;fp8oWcKMZY`oE{JS{JnvY7)j?{H->lmLb#(-+Ull5mMevwW$+W zkRz_2p;wUHv6erOZ7;1ID?pkxxNTQUnR^{!4UJ{5Gv}+_9@s8(fI+Yeef7w~?wb-! z93i`vjf`c0&w=*#%h(8ZKQL9!{9yK)_#t3tf8Jwn5#Yn|vKVw_zxQ~xmDs<{hs8lh zhlOhd;@?tpwAV}Ue97>%zUBvFXVhGXugj_LS-uQy&z?`;oZ*z5PP+beHm0QQoVc#8 z>OEeY4h5sZ!le&y9?=Rst=`8Kn4EBEPbUqM9D%Dxy!1~PF!p@cvY+2f!KeAOuckw* z)|Vc@AaC)CWGcwa1FB*T#qKTy-;2Z>4dm;L&)h)vfyLrQeiDoby9PZEunsRizpuyU3yj!Qbny_jc>G&{%F+h(Fuf0XQS zuU#=Bb~qile7Y(|2QDq31Pw*jKvH`%NW(q-aRqs`+@T_0xU>lK!iBj+P7SE8_dUzzQ&n<^nsJcAa}u{5hkkoBJ&LH zxVTUs^!0_1o>6*FM*d;`(Lo8Ng4lT%4th|A@xu+8>(RR{RyQ z+@h_J3DI6Vdr*hLK8m-M=D1GeW`n5+G%%c5t9JrV^TWQs6)bS`gyXrh@EjRv-i>c{oNv8(27uCoRg|sSw-MqknD*zIUFAi$)YluFA z7T6T%WkwkRa<`VjfN)O)_I&j2*L53X2M1OdQCGZsi0dp|igSNYu46hlF!$w+7UsYA zTe*^OGRx(MFV^3xWn{1;yCm^Xa=~3wl2@T<2uKYPrIBEiV9A>A7>7GCMuQE(08j(& zNXWMU^Wp$)x*9)brf}xbzJ@a)plNS`sBFzO!b#8K8=;RbF!y&sX;Bu++l{7G-hORX z_GBK8Ha%lZ#n|{nOBG%_8HT6TWFxp2)abloIq!XyP+KfJ&6pUR9YOMEHn z_>RSG1JZDmoxo)RGYg(P+%s^6ySN+}*zW)4SBWG}s05^{TB{sxRISLep;dxVE+L}hketjh1?G$W#cZIRR2d;1`HSuxcT3!h#9%DCT|3-C;~%k*=R(%dOa^T>tbI3;$_sdae+bxS1Uq1igpkC5 zHu@WP(bIOmoLGO|@b!J@*c@0{yQ$fU&qBJDih1ck^3PB&UbMxL<>_>KeiLJ@6Q829 zS_%PrRg=agDhRA$MMRLwb3l$KC-3J4dU6F@)KIbS&3K3S0~j)c)*=$1g@^*U_Al!% zF2`q^Zj_omx7f%RBA##aFIzk%%@$#t8o3G2-pl}%N9(JeZ%sst5ByI((SV?-VtdMR zN>V9hn1!B9FLzhpM^?_rm?3! z_W+^CAejO0Fg>c!kq#PUCYH5kO+Ut}S%Tu_PDihxFU)rXpFFXffpd0WNYm|Y%g*g- zB+1grvS&?CuAf(XmhRnR_R-7cu;!}r?i5WnP_kh9u0q0AY%Y)W=l}HD8@#;<`0*E| znR$R>rJ2~j-iHMMDEQZE{Ol^c+ZG*zM{LZY;;J(^NJ8noP@l>k*{xtAd^AmqH}TD@ zW*PRiJrur_fSWjju?kf~LC)8YA-;^(aV`>{);adz1NfcbfeU@GC=k?Q)A7)1S!r^O zGcT?jg4bY06=fC!8?JBtx5mgf$psXqA7B&Zvd~SHWvZ5gG%i_JYAfX|2YrEOn|xs@ zDBfT`3e3UqL_6Eja@VH9j9Tgp9vjGVRo4k9a0Wy?3{XVsn~1I>0}QlSc)odAWq6i<LlFL(ioV2`);7o1o1)1L0wL0h{cRsU zzTFq*Nyn18aDdF|HceA=twTQU$7avAxVmSLI<)%Ra=jS)j`h|9UJAYAZkA_%pJQmk zc@3-Q+_L?4&t;cmk3-xNzV?KW1WXBx*UZed%(XL*y*;eVweZ>!HMj6EffpecYuSI+ zTJ453k1f9LJ$wGke1|&_XCZH9`}O=?_4?=mm#h28$92{0@Ul>%K}9EKK@Q+A>Xjwq z3uyZ`J9wlWUdzUP)o$?S+W1al~eX(7CAZeDrep|Z~VU@S9! zd~14hq%!V^s_*7C+iYO^Eh{sFOOJ(nCmU~AT|3v=T`(sV1SUdn{h9}cMK7Qj%}g)t z_1gx*G#8!(Lnu;hw902L<>&hBOF=CE%@{!$lS=!6&Eq#%p$^|3k0SB#LuFNy$=pL2 z1k2NyXFPA5{w~3`?DY$v-bnJ`xkWf8QY4LZBK zkAX_vx9v8ThTpgJ|Apj)HNR}Gg11E|tL`zw#{#P&R3{qo{BhQCZN0WDI-c!B-A4 z4mC;-t-eP%R6FNBdJp0oGyYJVQKn3xX%Ou8+U@bwXdOW_GM8;(2IqdSNAPys)Y1iUcu5({lxiL#?D%%=1PhrRj{x^ z&asd8y+$HI;S*NH915P_5&Kw- z!&I>{sN_!UIg?5(Nr)`9lfu}i-D|h_hF@$9Iq#~iFSUF+u+qg=`ro&aCxXB+PZ-4c zg`P!i@l@rXA%%n5-!Rj>uQrdZn1>s`i0zb#*;Kn59Aq#{MDS8!DI%Hdk|J#swI6v| z%2a$@8t?Y~pjfVd)@Sl6g!%-&XZJcBTh|NCP(C7YBLXgQ%T_{sgz>kVL*o($qBk;r zI-e7jqlf8ARER8lnFAtgxwDyETraJq_B!#;wVa4qtpN{Ai5N0eUWn&p<0nB&J&9;{ zv>$D%wTyZ_(XfK#2;bIh2hQ(HHVbk^YVG4!mj{HR=t&>FNAV_zfkcONtt^j zPQLk4WTQzG3fA6?t?w0;q5$@0NZRiZ8kNQ?^z^)^gZ)2NmX_^;W+OQwL(}Lo{%&eS2#C}Fb^B* z+Wq_^Z!i?!2rm8@BY#mBp~r?GRMvZ!ZuqwZfiGgI02&fHv`-F9&O`NVdecJnSBZ#M z!Qf${;~@RP1-rj)Xx~)~D=h0QSeHjl!eoQ8(PW6?%)l*M7|js69D~cNST;29y&(!m z)yv_(4`hK#kFGS+l8?_V8Xq*5x`BRW%HB>IJF8V7>hlAeBjQ!>;irmtRyQiPS(Ex= zktWd)o?K#Ss?X#GEYM(MQNy4XBy@I{Y3VQ+7Z0lF>s0V z?eOqNHPmCCm`^i+sF)kS_7IBnvA#-pCM7ab+@XA0yN9JdiVe4a`(9#UokU#O)s^Zq zi6u7KAq>o;SU0NizFeNU$c8QKw^aBoXURs5qZI9X{6!<6X>Mfyea$ne1^ zG$s%MPFQD`RX^%m{W+$K6+>xJn$zg0=T?jhZ<`__Pg8hZKjTsG@FXQfwMQhXe;#^9 z7{$&9bKzPDlMsqi-tId1AA<4rQP+|vwBEBrw9A05hbVfZU8KQuQHM%IQj8@p_-q0Q zFxu^%BYi=Il%7*<*jaB2Cy^*5kT}+yA(FDuH_G_-GT&;}GAbdH8ZD?Scmzy1u|XfY<#=g z6GpPTVLCaPuRO!sn0lUAIwra5pm{U#6z?md^g}~+RutL!^OR8@W)dbsi&0Z6*>$y` zF?kqEB534E8mbmj{EUpO8G3xxg}?Xis_ozVNlW0ebG_W z7w!$_ZicnZ>|L0_mo^e(%))#pV}s6quDY>O18em>qBoLmREw~84C_*Ehr)yG0nZhv zCi*JtvcH$Y^{dOc*5UciduUuHjfkYEmxrFp_R4ef*5GJiev`|gltQQqp+T zC@pH02g79G^F&&%NGMQKPnEvwI{AHtPt#ZAv&!e_!7*OSB4!xI3+e`m7$wp~%wMgZ zdB*FogSOsS)%l7Sd@hrkwWJ-R#38GDb;6-(EnWub>gEE@Cb1+^d+2t@sN4O)CTGB%(JxKs;g*~sPZWGzRh2~^bLD-7V zc(0s)B_h%H(FUHHehOsQJlg9*Xf77QPKULdXH95mgmXqhF1|M|O4C0d^R%L4%_M|oFem|0 zy6UG5UcGA6Cs1T@%@-I64C_Gpw}J)9eM_Kq&)f$``aj*O_qkTcoa?Njz_Kp>}OwmwDm;~yQ< zbylT@f4IGVLuVY-RB(xhUR}{aZPw&0RbgNdIaNx^@WSM5yeF95j9Rt>OQs(vf_872RGYP=7Ja32TnvnTx{aBV3c^5=5&r zyf<5_)kCf8_dmhJwnY};Y3OUNYnvTo4}AsN6+>$K!no$ zrMNUs{q=roSGcaeNknRD;}`0mMC#F4_}uA;3msk`P$-vu^Oh?On_> zL~{T}xW@*SvS<&obpuS^Vkt-|#PhawLcb1|duk;vZ&CoP#u0p{m_T>*)h53vuLh0 zCE!PazenbH6x08-G~;ru;LV#s^}nl;wO`k1afUSV9KBbgUXs21slq_R*J;A*bidD@ zuj>mT?!>L6wl~#x8l9Cddt=mdy(1jNEsV@WfQdU<@l;%8Kb38Uu;`kw zQez`&4No@So!1OY{?5=Q;%Ty1P~sHa7&j~hrkq#8K!K!NoQ;;LnE$xs9AsdW8&zp0 zywC*hFcH>?akFgeQmd?sbUdQ1;l3ln{p|H|IOfNlmvw2&nhtevk9%a`psOE|V!*M2 z-mTHWJ;Qvd@jI^7o7fDdb@h)&_5hrYN9~UTkyp(Rwo4HPuKj#@c#>eQ7(q>+ebD#* zw|^!B9Lnw>q%e}+@~CJ&J|#QA)AiY+wS24{nDDy%66uttlYs$rQ{`H8)OvMQ#r!gTPD{MqZuASBA zd+nHM)Ewwp`#8x>ue}^Tzbf#)si)vDKD9ws2c5A=@?Z|}=QOn;64t%gg&QNzyn?_> z4;)PXi#%|6zeU24wr9wYw)25Ve~i`F>2&U{?bgts7bx;e)wvO1cIwgoh)i>658*U` zr;fcbt4pdeoA>C5tG=)<9q=Onq>VH3CwtqSk-0DTUk`;ZqCT~16E1hqSXV*IG3WhR zr6*~*o;I{jt`5;b?OZ=&nQx9PXX@y|2o+^=s^^!)H`dwc&Ou?1?CEMj@S2K(`S~`W z;FOSfxqK%=jQ~d8EvK?@@6O~ZixUA^UsP~46(%EGMUg27OdS6;kFl+(bO}CLSbH5i zFP9@w3)05sw+sLQ?XSxrfAK-xy1`Xeb18^p>5RSjQplX6jEYt$qv-vmass;XYzn1> zEa=RKcM@J6$Ues5TO>XlgW9gS*vJ{Z9m)}wJgY7j9fQZc{+b-=?=rehE=KXz-0^pS z$L!}Ji(jFHB1@#_fY*x|2!Vp7z(CR+7a;C-(xQa>ulL)t!7d5%3=4N%7oeX&MXx9B zr%4axef|zVH5~25?hpSwN{ilaWBwAn!uTh^3HD~;xvc;b6ZUg<*u_#04;!CG6#CVR z#p{3W*`=C<0!=Pl0(}sMtbWbG>&%*D=)mqY>+z!M%qQPU1WfgW&Q;`|mL!_ow{hdU zGxs;g6Xg$)n2%n|UL((jLm}>~Q1AwtG7B`d!(Yecj`D}_VsuV=24OWDZmuqQfyJ-R z{Ms8WDCy&y=#5CeHJ}$;y#G$!VNTNMgk}C#f0iW~ET=|95!|1GG2)L|wHxH~3Hl0b zXM|aP&s=q@(icO#&Qg%o3?>fIU~wSgm8@&Vay)2#!+Y!d0tTPVmfS3Uudbu@^w9YM zFx8<^_4rb!^45%GS_Zqak}}*-NL2Jh+b;Vr-#Iy#?Cj@vZ|DRi3D^A!Ap3<(0Ti|@O+S@tKq)DcV@Gvtw=j9j7mD~75AzNf;l-~-7amdoK4 z93o}Sk6YE&&4Hg@pT5Rfm{wicnd4x<%r)L%bx79zDtpd z{kEZYp!Sl|qIdn66VeyYf$LG2b$mm1J}VooDeu@+!pc+r&5e;JVZ9mEqTCUSdKlpR zEaC3!u{NmnJE#IL0Fa9W9MkrwD4}P3Ka9A=FEdMTE(fze@e&q3+^vZ%oYl1U+@^q| zmtN1FXvL8+)uOTmhJW1S3TL9{4)?{+Ih z2}T8ELK@~Bc~gbZHErC9nPM#dp!QoF5aitP+bt=-{DDf2Xd3mfwWW9xjbO|kv-bp$ zu*>(U#z5(ml~kwUUUzs=r6^|edg+R$>IgZ1jWmA;3H)aOA1ud$JvtHb$}%(BVz}G% zZhKD3?wC@}-3OZ|2NI97L-KibL=NQ6@1`Knb|&X=Vw&oF8~n}iuNg;q>x$8gYt0wo z(Pm^!vJd!DTbzWNcOuD~@PLJ}0uCQ9rK<1Tk~~A99iWtFHHE*#~Eh|v)5?R+* z#Dv9_Or28$c3*3Utj^g&L?+11CFv8^NE=$_BVGQ!PS;kwiI&J6>Fjm%(GRMx0Y4sg zBVLgtep5WG_oEJ!rti32I0R!TjIOoRtk`j%7HwB`3p97Z_Ed_AJ>_z0-^DCoEJk-k;v z6{|=XwWWn+e^Bb)HWP)8yoO3q77c&cKdAe%TG>fz;6|GS!SmCyqK**dn|NKmMBeQx zf~GM;tQa|vJ(XCbyhwQPaC4`L#?)XiNa5Z#knA7i*;x4{J?w9p)S&vWCjDqEc~KLq`E2YAA1oL|Jdy9+dk5%MGmzL)C4dwHnxqt z6oUSir%Wb@&Ta_>4KwucHBjMsx40Mv{NFe>Co>VMtMRk+u!5gDGr0P}x;60S-9?)J zVrMlWx0JkrR!Y=(t_Upv0+W*YTbf=2N)4Eq9lNoU+P>X;5I__oiFkufW(4Er)IE7z z%ql&r$LNpJ}*bYlz~! z8csM0CIOZQKuFPU`)?v%F>!QV+{zmOeEOq{eSc`h%Mq^8o_bYIzEa76X_Op*GNLI9 z;*Oh3OiDB|ovco=ZP;I@ujHFQhPy=so^FjPDPNS><#|hG+nRlRDD1Q1?84$3%l~Yv z9*cTyzfn8F<7Vj|Lo76>&Rpf}gi|y_uSnHCj$a#W`UBO_G>Q#dPR);|p#-wWWZvKB zgYo-rFQk)x_O_gs{dkoszjBUj778v>3MvDuz%_Woq+qE3ehUOtwbs+8HQS52>Jb7$ zumfJPG9g3-#F+Sc8XizIxP4a7aJ4G%&Gbrsy8J;p_Hr4|Hy^WmTFy(yEb@lc?B)Gr zWnt;vp}{)3VIqz@zq>e1X39xVtOxlb;>!krH-+mIC#LCzcu(AxD zp1o-_eNeIi4WjFFy6Zd0V~F8+5ZUj8M{+S(C6#8P3VF6|5QW6}wHW@`qaX%0Ig>vZ zpEVSW#ol|+buP(1_QR`}N^Z5DO#)}%T!YwwQ32QYxdtzu=+RWyA?Kk6L5Gj|j8)fg zCmNLq60(_-EhGHwsmHTe$jgfy8~TC|V6018ro*9 zx=<^#fKFtIu`i{fv%Md~_b+4=ek}(iEw>YoO@RI5($`n--da3PRu(zm1gs8!b}nCI z;_2_1=tTxp>OfQd)Z?yLZ1N!i+S8~aK&kErP!S1}5y&E6k%qqT9U1bJ9?IVej2~l2 zav$=8*sHeG7ZmGTrF}xc>js*Zk%WbC+;!`kvJ!%;3Yq9mSG%52b^1jfOhAxR1~`+} zqSZkp6-apku`__vx^UL9rOU=Ak00I-Vr0W}31-*2-A_ie4})ANO%{?6wZjJO5n6xN zhI3;=7w0HAGBM4TY5oZe5qjsX`pCslLPc#vcdyWk{C-G7ZObR=s~!SlbDA7lsJF6s z;3;LE^fub}TP;W(`EXo0HPcL~dMlBM)8`X)_!NgI{HmdU`t-!mIkMkD#`|{qUXL)a zQ39TWEbu9%Aj5vO1if6mG(tUInTUqHX-6Q8JDfZ7{>sKvYZC+{qhU+=(N#d#= ztY`&3=XDX+Iht>&48|v7#A({=Y%G9J(2b$(A$|u@p5$6dLpzX*_zR~ zw^@3y7)_(*wD&%>+^R?fIJ^xPs;Jt==<_^Mz(=17oi>(;%ux{K`%oTJY{m;FU7S6X0dtj%Nk~mfCl6{!hHki6d9yz+Y=;3;kYA$LSkTA5gsX%&g%e z!8Qp?uX3D#Yvg;5Xn>w=J|z829a8)D1t3oVx&?4HfIp%eiLg*FUZ4i#p=FEAhe7JVJkU2M?nBP{*h z*(RlqXP2!A%0ysmI-?PH51j`8N;dq7O=12= z{zQ|{2_jDCJJj#iBE-w*R^BbX)gR-6&K&VqeE%X=NhT0mn{=F(mJL=>3C5|~Q_p{3 zb?g!+KeaL9B;ddIJO!6ehLss$?Zk9FgJujazNr1{>m$S-w;&V)E$sX~d@MeX-Lu9o!k z(#=x~y8PJDX>s>Sqkc#EzX#LX%Yt_glRXg5Kc>w0taP@2TMhF{Za01;&%Y4_8e$JK zAx|i9d+f0V<`k9F-s^ewn5Vl4GV78i_lr;1Il}c_2JoeSn>#r>#rLlt38e1jUHp~(X?0zy59X9Hu%k<6yzLPaE zIzsjK@8FGSfHHh~AzAkwYy8ko0E=)e%Mi**o@S+Ph=|71go0I2P^qV|d044m(H?w4 z&lnEZK}#mxd--xWj$`SXJk_}!3;lwicjJWL65`v}YzGS>pxRjuVJn+>dLsUZbLwIY zwq*4!J4dUdnB_0uPxZso8fY7So2aK<(Avzdlz`VW-5>&?f7CF?U>-$GQ}#r{yh@9R z%PPX%^I}twy8(r(Fe}bCG2+bkS?wcS$T!)cK?u>17@ApS0+Y*ujY$V-R-e@q#CPJT*o-dk-%aZ~VnnB;I191XsfdjY0{FLf4 zg5Pyk8epezb0@nItzH_<{H!*^VNAs7j|ym!hL)C3ii7*6Y1s92n%r&^!5wvK>A=Td z)D`7c_hG@~qZCPRP?3zGAd;6 zKUJDv8OrpBMm$180e2;U;-gFaS@9fCgO{p`^-qpeBz8E(s<)V%UI}&WUz5R8akqRK zOEfVyJ1}sSa>+1DN;iOqy>?W=c}!gDYLP+t_z(>2d?XPM1wTOpc4}<55x*f8-0>3! z?hzgp(g&fb%p!h?|@$MvvHz(2Ixd=6KEqAZT3 zOG`FYV-dJIC$`ts?c&PUZJMjjQR=US0OV7RoP8F;D!*qG4ue)i;< z%ja4_m}_sjm6X>!1h;a-^hojrZ0T+$%Lv_y8HUV``3;jPbm0mK(-ifx zGc!}G6Wh+%7RP;iFEozQ*U9NI-=>K(P$%C|0v2go}ClUA|FGk6ip-$T@593Lwx{$xVkfytSZ35Y{AgX z%7_eZcM?v2JF4u`BGRx!4cNHP_`<=&5gV^Zx}Z2O0j+;pZRU3bvY%nG5H>+(ReXNiIhepLVe&Q>l&($u&xG7%N@~|fas$pHkC-o41Fb@jltyS5Da>2^ll=X z*-ZF%!pD)T^=bY_!fhycy$YTp*#19>yK4Q5^UQQYFggNK;@m26H8ZEJ|AjhRo zfZc<(PN68eaN&jY-np_Me~Z-$eFy(Ci|d>GcSR+?10I90ZlLBa*++}EHwaNt)3oGW zHs*-V5R?v3Y3!idHzTA5R{Y+s=+Bg(ZnUft`gUg=!;x$m| z@JZLk?j(~$`l#a07t-CRA21FB(Lgul)o=N#Hha4y9cgp~C45jAKjFMnCbi6wA8qaM zL~3knTY-l-9#X>Be;5a@PCMlA!B(RIqGMS@q&Q@bgPr}QHHBJ=$mCwPc|n|l+m%>-{FK_&mav;Q-_8C5fDx4b3lJ z$xu`4^J8JY6}xM_z*BeY^CLxudZeZ6IJKULult<8Oj{;F9`Ts$P;iW;do|9EZY!U} zN(>;xxGWHMd1A`h?SG{B=*tMD;2X&^(!c?%HMb$<(C<4v@aVFDv4-e{2c(IPDf|*f zINH;7VX@J~6go0QceW!!BxY#ffY&zUId!_1Hs!u|$FE~h839hooMk}}V)bmC$ocGG z=$zaD+Ze2b9p)X`K!Sa^vwy6_NO?+!H~jg#_Vz_B_aYR`)a{pxpQF9ke`^zv{AFKu zN|xkF44S%NJjX0Th(=lE@^R=v?C`PxHiaBHl&GAq{omL`rpwk)B$M;=l zkbc21IwnHtRE`CV?WXqawDY42Ao*lJahtY3PJ#E6bE393gdS5;fCmds$>0EtT91X9 z0{dV+%PZ(FdlOXl(=O~>kL#iNmjTha03w(?B}=g ze)Sc^jg>B4fQvaHMbWi4^(O(z7ZESFS;C1G4o;!8wF6Q9k|sf?(wM))#fF&5Dy>K} zH~dj=mtsIR-bxiCjTG7&>2ms+*`_a7el+egIhEs02aB+Ds zE%|p=iiGKBJ8?Fci(*#_)r#iHjfYAtS(Llt0g6W&}nD-nnqRPgM{Maa9qACAIo{$Unvo|0^ z-(qXTlEixVSt2-vxFr`Oc`_J0M|+?tScOv(>QCYYQ=Z=5u>2`2~&i9zv8V0@u4h?riC9rYkKFXI} zDRGIP1Tks>mG?(lI6;ys%oqxDp5cEF1zfAme9%N zWBv;ytCzmJ56Ya1dkyb$!E%|R?!(g9Irpr9K)32KceZBwj}Pz~7MK#N*GY6$S)Kvf z8wImCCW@aW1n1#zH_X6nC81nUgeMyk=4vKJAn=+$c|Tbq3!FlFV$O+uDFfXr>xi#8 z5@CjQ(uIB_KH96FHMu1eL>Qx+DX|uM{RD6yOFsW_Oq#BXvBri{>~AkX+1mJi0~R(= z1u7NkvYPqOT-TNiih48h;Pb%Sjy3trC*AX?9^>w~ zx&50Xy{w_j^X2wpB6{f{YzJ)qr1k1ABsh$0#F|$3=>ezqI!+HlLk&yN_zGt>Vh7uw zd!k96*xAM<6ochT6DhQwQo8bt&{z192(Ch|sTj}aOJA-1XlLgryK65P%bl@?VLBl9 zJ6_^85!AsiKt>_onN;M$7~)qJjOF*IwQK%!D8jjaK{nd@3A7Gi;pL;QflOfsT}zGx zs}BipX6Gh9&KIh+Il7})80bX4-l2%SBo;c@l zR%klqiL&ab#IH0dYUyI;!`JnR8l1otN^2Q6C@#+Fx^Qv(P{uVKXSW0j7k;a9s*z1MzY}wN*4Y70!8U4sAH+TAV3JVSewFWZaFRTAUii~x z9?(+&BNsElQzmqIeF}WOJ&;Hw7rhywLi2S;++D8(Coa~wUoMg7`&&d*6-+ALC3sr0 zi9{}X12_^d)140(j(oD&QhBOU1bLNcYD6JDLT}3JW)K$i zTEir89sqW<{3Sus6BbJoxZSq*?{rV-+tAyL+wZ_7+DPMd3Cu>eMGk1zpDF1$U30g| zJA;bJi=6j$znR^=s~hu^k^7F$M*RcuwODMnTHRdgsfRPmyZt*mSmUD(;RYjSjqYwJ)rZAqDy4&)%6E74vTxmITQJ_LosQ9gmD=3l|QjpXGM`Wgg*-*K7 z0vgS@S;sTse}U40SFJ03Hx~!)w?~c40s>Pvw;7#l)*vTwx$*@zEh%y?<1V)>J|lYi%e*`qkLZZDZ@gW9V@ zivDe12w8CiJim#3`||DH-ctgoU|LQgYwbYeCOhPfFS7H(Aww^vVu-O05RgE_c6RRI zFtx{BtKY67T>qm*E84!80GNR~)?#n8qMkC)i=Duh+vX%IVgjU7e~sA?Ibf!r`K!M@ zk^HvwWw$(5N5)aFr!1+7(O|2Yto6rY5}ppjHjXGFXe8sv4nW@GE$ZuSRKpS~#~40# z5^Rf;<#vN+4_249GbLm1-@30g8j`;D)Pfz@L!ZNEExe=FLWDxB zePF%535j-SV@U+xJCrsBE9D+SOOuwH@c<%(A@KZ{t~*Z)j6YMNm&HnyDQL6{aeK(n z^W?k`rzUoTOzp9)es}ZYpDQL=CZuF`g}QI-wg8)N4!i+=C6gfsmoQOd5`>EcL|c7i zFa+B$>q95UZAFw#ZXssL#`pn{^SHWfN4M%~qJ$eB%F^KPfm5y0{qfvw>CQ1WtDF zTt|EciWnxD-tb}a)U|o-AH20J#H|Y$r?6Y$&q(l{5iU84?S;qg1Tv$A5(!BeA3 z-A5>e1VPyZa;L}4&Bioe{v7BRQmwn@cYoo`! z{yL5pcf=uet28{K^lYay-JuFodZ=9ug> z?zR%IXM7~g5g&Pu)JHN!(2>_qv{4u67#>W4Rg|&o=4NPD$Jmzoz9ijlB!HuoqrKG= zi!I`Rrzu$_!2&b{Yg@@^w~=@%8nhe8Q>%a+H!SJy9VKNuuZ-savvlN05dxbr3`%`Z z-q$je0;8`{w^iaaJgNcMxC~S6uKrN4QKjRwB+iz~vGG%mjvs+L>Xt;~l)GFknkXX? zl45oKc=j_N6hl_oYfEBzxMn@gcSUW@{j&n5Ci-i6yi>FQ-c4>zPlntRmlh`ML7#;MKqy%1or4jFWj z+Llr=s)6{r^ELefPt#10mCu~~{ytCMxhxW#4k>&^U6b=cVe0RMVGYXY?~EeYy4-mN zOt#92QlsIY=nxY_Cz;!OSlka2;7*MHWY^n2MntXwK5$W@dWQh)dY(asrqhz zZUEI{v=e(gKg9fUE^ctiXMW8NqX*${AP!Bg1fYla7``2{aXc*y{vcpY_4(8kMx?+Pd(t{U$ zl)23;gp%H|v;?Sll}NM!Aq6fAeR|P+K(H$_mG_t$wqgX_#qPGv z-AwPoX{qk5#>$?+@mKv9-@7tiJgR^u~UNVoa#5&aFf8BV$=>tLKQ{YPyBlWIzk?oBqC@eK6Wa5eW1H{J3H2gk) z+UOY|^K(}(!Rgz>uOm=8(=Y)&-YkOiX6fa2lGs}1qS018P!(%KwnUIl9;Do^ACcph z2}sl$BN{GEy5=Dvoxx;HNB#hKFgSTc6RcvJ&F_zoo z_a4;Lc1`JCO!Jci%KV0)J<8S5u1uR2f^$^)b{~vKh2EGK^2MirpXm(o zxl7Jzvi2kZ!GoVTeT^<=;Dyir)eQwWj+fpRj@tA$_l#=XBVB*8C37?(A?Rsh9HZ(I zKmL&^1BmozTrU;NS6mterCSW^#tJ391*2l`6ItGU1+!zCb24suvo*PY{N^oA&Ic?Y0;!A^)%AfPku#2})vfk-EtFGPm%+f?FCO2XopO-H8n{?Kn zwF*U9T(&*(^Cm$iPW)ZbBH4B4i^}>J7J$g{DSic2WguGngvkaujbcLZhbpPh{Wb=*M@jQjL(F4u*m{a$JK=JrqK@I7sCGy z5Mp(0oG3Y*NL$x;L;@*u*o=3yc+P3S-(L$WAIH9*Kr8ROpi{8!RDj??iOJVm?ll9? zQb`VXYXrW!9Ds@jjP#SXyGZLzbBy+T0Bb(B{OTFe29zTB2|COj*>jV5up$Tv zRi4qw<*+-hRibK5R^%qrW4KU|ozzy1vnzHUC3VLLZ_Qn{ufH;2HfL`jfUSLhgJ$Jkh%4t^g#jsA400Ftiz;!U#M) z7wAR*#M-99)4hih<3Dy`PVCw<~ifStEWr7G{qan@I zqE{L61w5wZ-gWtZ4Lr4shBf1@`Vq(XfJ%zCZ-%zrIY9O|@praMu|`PI7QNMK?iJpF z|IF+RF|!<#F$w@$XuNI@p*W9!2k|!|hx}aS^{@;gPOvuOi(5=yor$i|DXydECCJhA zwg0sxU(1bqp7g~+XhbgB9lbWMB})9swEjb#C#$qW)+C3DNatu7)+;R9rrjO=9Hn8# z$@Jm@H`BiX;3`njP#SfpRe2L#Fxuw@<|bc#FhCZ;Am!xahK)WGjg5X?0ZF#0A2TI< zz;c8E%fYA8Go}nkg75GSL?u+K>bkEVuOBg{^IUCp@X(z(C5@83j{+ReybXwz93>)z z`pLc~2?zeX@X{~YGW4NTa}qQ9C6?QjS9(a{T^cxjgYW3<`%3{_)ek?C2q6zfDZkT? zaT@?2H}ICY1QN$8R24N7yf}0aVOlKn&b5&;QM11mu4h2qYW`mBRBCnGsyS%o)cjqM z62YKRT?D0g0p!h#&*^2=|30@C>5M;LmO8~Z2&ar9zDNK2%fvm@5dYoVmlGD==|>Ww zJHcpb1)xS*;B}JAVZdkaX@)V9Ml^NEk^o_7r#;Zd+zl8}KhN`{gi?KaTfDk$`rUfp z=!hrNQc-m?m_6-jHL`lr1Dpt5_Qhmvs-0oN2m`Cvp0Jjkkhl)3R;NEkP>-2sbLW*)`8M^Z!p+1mQ0o5{GgGx-880SY{$d#I-XiHf(g@+#M;1S7bCK6N81XafN zJQkCd=b@$L*cRlV#wi1_5y3po`ja@$>7(cAC0$whrS;lT5rc)t1H^ z@gpm-_d-l2k+hJpR5gZ(KB_V;sK7}ScXL8z^NWFI*C>T1O~(KE_0;rL4fq(nySrCK z-)%t>kek!jCZ)RK@eqzBwd>U`a?r-)`x4RGh6#h7IWoYajfrKZisYOnIFS|5y18O* z-6GYV^uJ=s^1eM8?Y9C~=x6)!6@N(+(zi13m+Ozq}ROuZ&>(IN7POH%>{x*m}UA zQHyP;X9o(&SRMDz{%i#--L)j{!C&GoAqWq?I5F%Gw`O%FMwAkO{>Ow2nJ&_$!-1VN z6~9uhPFNA(-pFKNddNn2+|hEV^#dS21vuc~CE#z7{x46`l!60WrdUZpS?st(PT4!p zqICw|3di-DUqlJ%E}pMzVJqltl)biNWqtAMT*U#zBycT|`((jCrAF<>P8>iWN$z~K z07e|<6R)@37&Eg!hTgrm%l$xmgPp;Y>VT(&|DQV1C%6_$iv7lI3yGkl?)#=>TY3xWXws{vX z8V+7YE z6cP=_koM;lQ7CgZuYxD4hdY*=W3hb8|Le8tbY9#`Q}Z7)E`3&k{>pX<`62@?FrS47 z7Pan`{C!kTfQTC#tJU!q^sp zlu>N2L0>LJ@#R?f4=x)Wq0bTEn6NMn-|!k~Q_)I_Gwu zrdgM}56mMI;?|HlcgkSe^SR@eN+7tFeijeoIk@Hj$zZv%Ad916nGifoh735qi_Z^Q zxDfSwi0o)6IWfQ=fsqvBb#rvwC}z78f_wl$T;xh?@A?=4*~WWXX{(F$&)a=ttfbUa?cFS5faOTiP@l*{m_{YwnB-Y zS4^P+5_WvbFM0Bi!j8+&-brF9NUEU0$g&v1=$HY!*_N}Kd+Q&iKx{ct9Nz#gY;la$ zB;?sTZ5dn7csC2NS5z+2f8R!bx`wC&*StSQ_%0o8rUFh~;KK5vZTSjE8+s4nW(nX9 zt|{6#2r+Pu!E3T5xxSAn=G7-we2aQYEX2r?j3v<@%Mt){9w|;hK#PEXL4={!Dur;l zP`y8ZQ;q>ffr|e3O-;nZ9XiOGru4e)zd{k81`^3JpQR$dVPY%V-%sbg^ieK9P>83P zIcn@LOy>Aeo12+;0LMM>pDKT67}?9>H+VkJ+KWo8sxJS5kxi`bx#J3!)wa>*e6ju?{MT+jBUQ)Hqcn1su3V$`MoYG!I_>;FGgz^0o zl>p`VNe72jP6L5FCc|jnu?1vnFK0!Y1#w_{O2Hu*)tK>%yP~&37?7mc9nMY23+qF8 z@I>-ICp;@=URRG}g;2Z;prNXxN)k68r9bkz`CqYtx-%(=vZ=pg!VCscR8`b=iiEv6 zvS1!@K8Vh|f?Ju)SS5%_Ax@LmAs>GH=H8q=+x$_J`)#By-B6w6tAyGg%ux(zCawYA zD-I;Dqk1YL$C1HeP)wxD=lH(hMqG|E<^parF;P2I~QD$74!-_18PAVjdl8sd%>0zKUV|Y#a7jBAQQzAVrEZ4wrPDl; z@Q;>QED<2mm&*kD=2JgI#Y~CqiS<=`KXOY5kKvgkbM9 zz!L~}$Pa!FuZnYL>94}u4hgTo7}*xO^?*SC+Q5ae0X33^T@Ff(Z#8f&r+JR6_-m^% zw%+N2Z`gt2qFqz)T+yv+g=z-hC@B5+Z(co`CG9C#^ShRdY}R4a%@T2qq-|bRZ7rI? zH{MJOj-xVrlMYA91ukgg$-rg+bg{Z0%8E1VCHS zIKAAX1}I3+Lai~cJA`(Do2|whcb0E?9@aCngb5wVnrFN3-(g_TnsA-KN|Zj3Jf$P=>2z=NRQ%)z_3cACdJYT*(-x zI0I<;7HJ;;mgXoN=3)xQP)~OdOhXDdSkAsMǷfibx?OYiVV0BM0GrwKG8M$)JM z_6?t&Z46zf=4O$G(y&OZhy2>NW`Y)&R+1xieEz7B5AJ+~C9Ptsi{l)5Rq0!jjrA~y zuwrB8X}^Joy-U=y?}!5u^0ObCTTlUa=bQ*yMemY}OkeV2d=$4gF`W%Zg(<0;f`FVbI zHR0&!CS4GkE{xcQ4T= z#AFLbUAe{pk-~0|nqM{6`S5f~QtkR!DN?8b|N%2ic)jRaD@X7;)wgqP@r)!modgQc~4nXGo_QgfdaXmeuxL1yx(J z_!}UzDfVej&~`)6*G)@~TI}!^cEq6*u|`0s(8A9~e;ArkLrpU; z+qvFjR|%kQ5To=}be`a6JADOG0r|Y6iQVb1IKp!Ej)9$Ko(0lkM~HO_JvalS)v{(J zqo`_!=G;Mqb%w(a=Fd|C z_2Ep}0e7wpuk9+~1i!Iv3nBEb<_HAEqBt+zK;njli$JBB89Ark&H(F+Zs3AwMCyu* zY-6eu0ePUrD zZhhUdZG-lttA55bPBD(mFn3}w;3|ft=ZxK!OW3HEPu@O*5P_m(UNp=xOnq|X%q4|* z6^?MbTlBGYevyliIb6Jk^K<@A_;Dww#yO(k?9TR;+)%Y_!vFYV*`k+~BppD|WKak~vzrF4oAF~y)tc1TNoK;5qA6zocY zBt#HvMh(MVHu%@o_|jcE+6*ZMKgR)l06;*IRX_QK%1w?PQ5qJV+11S1N1KSO(kNd8 z!=OzC0UWVW2Vs65okF3iyoXy9N)gp-9iDp6c|LC|xeN{Nph2tSpV3fRj2TV(SBN!i z`b(fXlAykH#3x)Qb>>URG)0(Rq|eycw?}tM({UWP7agYZ*JxUt|4H|js=1mz_P2__g74|6I|Mxw*CenQ zKVi1iO*&M(XMRe91wM1TDGeQZS55m=x_kXDe(RqBd-UQjG&uL(t~`EF`yx&IqU{vL z)PIjvM_+$KS>}e&!^1;~3;u?MFmZieKw|zh`0b&f`(h75Htg-KC<9+#ww1u=@Au*j zqg+L7FHyKh{s^>jRK2#a!H%sh2b?AD_CfWF%pggg(w`mgTfqPwQ4Fdn+?TNJ@ ztF?5~NFa@vq86&!c_Fg~9X4DvY*5ZC}*M2bYAg`fs% zyQJ|TA`tg)yQRz3Jg_wXO1S3_U>78@OTK5YMK!(+8=j)d%8i`@a;y}oJMVyr^C*Eo zyOye&_!NSAUYaSnLguvNL8yqJ_KrG)q5Sy<(T1p$N-1eh{iep}lj}WGvwqVQ5rpAm zpXtJJRb79Pqod?YbTd!LVf);`Sxd;E<5{hH>%#n)Cw#>da@RiG0)qYJc$NLm>2?h< zce?=W8qB|Ks?svm^475g%ygYk?Qr#B_4f)&+Hk1ABPB;B-9ly?KzK0}g|S@(Y(~0l zrTeLO7_{%`yzi{;gOK^G(2ylyqNu=U zlS5Wh8Nqqb!$xe!sJ%BKc=y$(E zOalvxGMHRa%#dCTp#me)v0Nkyr!cRZaHd%j9=s#d+cTLV-3)oZZ)X zJLA6Ut*d0=J-8f9#Ud%aLs2ti`jAak{PQ++V2_Bz4YJiV*Lp%e*<=;AIOi?P_CZh0 zEA4RFCTrVH$dUVs<;)V3%9oXz&W)et5_YDx*A!Afee1~^qm++TT20pO(?aj6u;0B} zBS9$Eu~O}enAU98_R{Aq{FK_NGC+#>33Pg9vksJwR6lkT;I}S?xOO_{VYei{tod+z#!qTY zqVn)}iI(2=>({+|_~qs03(CtuK0ZEpg_xODCF=LBp7(8QeNK=0{*V4V7?lBu=1<|4PDg0{lEm6`*M zC%LP);g$8WF+)|d`fR_jawnpMDv}~BpIf;(*iN*v+TI~YM0nb6Y@W`2Ra4&SccqgZ znxFRmrf&_G+YG5yFvHF-(VCl`(o0by26!I&RgW1mvbq#PD@Nm<4R2IB*|>U}tZS3Y z($l#R6x*3rpkX z*P*bP5`7@T$ixJ*osLO=7`-=~Z|s61t-Whl+cfy7i|&h`GF|ESFVzIKl82!~-M3 z;4NEW{lpIZoJS=V+>?XbAMTy;22lHNP3gWNhy^7<+jRCFWSK=RB)}L4F zV*c97ExlZI!QJs7<#;DW1lxW|oaMJ7{MYLe<53oD-h{#W@|?%O1bJzU~?r(0X8>Rk`wN#6C24_Y`&>dlS0suC+hrJY_yM*xA&pjOc^a z-iPIt0*9Nu8o3%FUOnY_ymya%!ju`yV=BudzcR{g=O25+|LW7(r~j_11T)Yr^*xD8 zx>c!>$_#4}Cip7{G_E%8uf4ny$zZ0r`+}DcuAVn)dGTxd<5b#CIFe!QgFHWTl*@U| zFS|sL9|~w}HOHIu`nNGF;k3$GqSoRTm_!alTq6G&u_oa$Wvc)N! zRB_t-3;un-;d~BUySw3z&|-hCS6)_`LjxZ$stFkSBaHl5 z$gJrsh*l}X<qd&7Y+(Q=eC#?k(kO*N z&)huqbK>yWn5VI^HIG%w95{~hjYu@LY4Q`tNk~ez481fmLEdf?0U>>bN+FjmORsr) zzIG9P+*U~2V8_EOId_m-FAlmu#|woPOwdDg!6i_|lyUX|W#AZ!{`(pXzXHe50bqvb zN`SE~vMaBQ@0oC>S^7VF_N-$y;;J_H6c!7z$lS3vv|~lJipR!EP$(30J3H^&Qg1ET zP=DS=d=^K~l2y`YobaMf7=%0owIx(lS7-HZo%1e@G5{xzvjxc0&b~09RGAN*!*1q5 z3ugR#vX7}TqK^OlboL?}Eh+ln5C89vY;wjIaHm6n(Nm*!!I#g_)2F7T#f5DwI3FGC z0wkZZylEL89$p!^Xu33tZ@Z8Fxw_g}sFzd;j8IOK);9*@NXSjtaIuMprze!x0hhIp zk?X$}7WVP8a)`2(ft6Os^0@1-$AGNezj}A$ooml6OUn=fOm97)-iP76`36oSVha}% zpIL%KhmTiz;(%i>HqK<5urTHr75AR5=iT30mb324#6R)T_VFqEEUdtW`;3kJGHj;h z^J~PjB-ia%HjXDKG&DQJncR{5V|iI2vi<983p`@?AqOXC%lC)vTL`(OBAN+s&ml~# zx+8&i>D~MHRD4RL_mEp%{$otBG-)*95Y(y2%*^bDd777p9eeK%11g!_Fv3ss$v#G- z*!qL@9&y;O=e1M5ckbRL)o=6<=eR$T&8m)vu@a$fZNGOB#aL+1%A?avy;fU>!N#IO zoobNcpbkwppxH)oW7UTbnVU!?vRL}f>(|%y^%JbOLu!wvgtu%&p}D!aZl?mbr$S+U zSqh_{LazYa}$O|-92Mb32HZQgR#1%RjfKo|FHgR=z^}dq2 zBzt=M=x{%vN4jQqcQIc{m`UwYq8EpavLOH-Xm}_3CSInE5#t5wX1KTnjqpepFp}@( zwrQ09aJu(Q~FgQ^%GJpb*UV>?Z&{~`kH+h=7ddIk7+u5&tg#J;$EjYqI2 zK&rFa+dVNUDK#U5TTNS3t|*f|aJpxYTGMpMT+y(VmAUF)-c~m>J?*>4Ph{WHqD3DW zPC*)?cY7Rr*_)&HN}^j)S_jT@&QT;Jw{g48$A>S&ToAS0gd3rN5_;`@Ct3xUsTo-p z7B0c+JNM=5*Z8L6!_^(2fgK9aHbOLA)VFH#wnXcQnzDd~(nC=eM=XNs9>3T)kKR@^ z@{QO9GA{rUQ}tE0U*j&oIYUD(29NK%%KO|vJZTFLo(dizy?I-HmHh%J$Pw^ddvjEV zt0WC5IKmE!4%aCd(RBZF5Ix+4BN@10bwRJ>z!fQ@Ko+ zB1BtF&PhDJW%!AvO3-GA)+$KVTi)KZc1t;VB03Uwv4 zY2xot0M2lGa{0Q}F0YAgD;l~@oB83f$7}N?YC$f)1(?t0kQ24{vtu2(nZ&}N_fQUC z0x~vmKw|{sNcQOx%dm6V%f9>8l2jyg_=P1eqoTEEA%b5NM+bxK14dNi@sK8f9Y zgba?Svmr8R^3JhCLJYwtsGc7TeI;ri&h^av&CXBc2C0vhuYJA{9SFKFSue<7TA*?^ zd?C{vbC*3|H&Ebo6xyGi&h&xAyS%W4Ot&XSURIj(WV&of9vrv{@DIWMx)@>bl&j122Q;m_ZGVHtxL=S@ z(mYrA{d9KDXGXFJDMuYr9F;cp%M(8Ld5p8!XboO^YDwgFfL|=HU%zRQ;lZk>!?|UE zh%v_NiE7b!LgqIBkxYNZhc-~`7kIW5ymdTU22edYbPZEG8L#eToru@IIGkD8x+Zx+ zjk|H7IZlA>@+tavLHjCT?>jqpV)oys4LP~awVv1{N*Nc|9##8Fq=kkb3V!O zpb$S)5_8#EY?mf@qL8pVL?oO9$B_kWn*71N)FeX({fYES(_8Uqlx9H&59J(EqmK6- zNV1$taH5uiWzk$!#W1v{oYs?|g%Z!T}20HH-L{Xe*<}+&ImKRLj650|_oR6Me zrz3u5qD#A)cZP{x%G+b^7RU4OfiuY5KEvljW)^lKcJiOT4~qc|a@STHpA4NpqBI>b z%^V+XQ^`Wdqe1tL zbOs8y-;jfhR6TIfYFA7knpai5S@R*w|}=y}EC$X2Nbm=M!mMUyDiE?_Mongkk{EBCYHL>p zQs{DO*9STvzR0U!#Y?5PO2L!&j7?vS2cFka9_8^(CiJ5zwJiCUQvTAsUf`(&3;d$@ z%#3;TCwktsbes+J)bmh(`-15skC)A$#9Y37zJT+hPx(##t?$)hY&$?Gx-594Do44y zfQ#@2#=0QSl896syi$wgl)=8B1Nmsz87noBAcEeb%AUhh=`Hum+-!roJ zLMUZ5S-Q8ux=HhZ9AcV$D1;gk{~bQghBX-}wT8uWB>K=sp&37kv}waPJs;Jk zM5Z1PC=AXG%EUVD)hpt#*`4@+-%V@CE4W*{r9vfn;cr-f>JJGfm$tK!xh&aiG5{`4 z%h1VZw4j!Fv_iT?mJ0}kI7^eD?-%926}JkZl1(O02h;++Obd|1`A5GYuw`92A4P4% zg9}fy@ehpPf*>o2O4&1=ocHXUPi3svPWXOWer$G8Z~dD2vtVSaTKK6U`F$pG{UeI^}DVo)uo&&r;4=yXBqS z8Nw&&9bvW2lZ}deGM+Z?MkAz1ry1?gkh*;4)$AWp`mESZ3@lFvVj~>J^R_xZI41JT zuWxUY*Yvk_pimx~5fqa%u|{G^yXtSOeLbJ}L1?$jpm*1kSE+slJ%1k%J;c-D+p;x3 z=~&dAew{8O9j&3Xct&>M+66@y?}~pW#b-Q=)$Gf;Ff4x+q{eUR;^Ev}(iJ`=7v@|- zILSz1AO|yV-(5VB5NkZTEO8x@+`CfK$)kD^>8qS>afvUEZGR_{BL*bs-8p zlS;~1TDk-N zeY>^+@Hl&8vPRfGGaMg{m8`AiM;apvdQ@9AA44-GiUmAx2Pwm)Aon_iXZ}^u=#&AW6D#QdyTwF4$O>woS=wP+YGa7JYN@P}*H&r>)$D_vY<0TDFeA@F(E*zUJ$ zLlIBgF6)TJEB4O=9*cxhFK`4w=)~Y`4B4hy3uq;*l*6PrMpktCyEBs_~{zpp3@ z#=R49iF@zdc19#iwsMwUbM54?EcGV~9mB)({4DkTEll}*{*R0u7e1}j^{6wD!xN*_(HyikC;?#*la9RMS* z1}B*%^lR>W51ZqoDnV2}HuBT#!D=z%-M*N8s%ndBXTccY){6o|d zvV*Lw?^b_0Nx~z*Ou5|2vGqEDYL)G3@Ft;yAIUfi?R`P@t`0;xMD4l`uT*M~PGkf$j`~(Q zw0BB+!+xx{pnn5Bgk7cZ2I~ZCj2UnX1iFg%DTO_f`vO?|u_pi0HrxlhcQe+20X%k3 z6jJ14H0N*Jy3~y^=%&ip-Yv)( z%xQ|?>BY5(kNuFculow_>29+!naodn;JX-;L6k*Yx`xD=fxeoHDS<01^Jqh)-LAG_ zk#W3=%BB2O>X};6jh=>40H5NnGm>2^*^8d6149dmf91Up=E9 zVAnGeq~@yDQj&8gHA$D6oAZ4LI$-gy!B`Kg#yBjV%71Vdks8_hF+r@3sgmtN-h zGP>=rt*a!o1zAGzxc|-W({bl{`+Qw6#W=e)yxlw4JI%@fE|o&UBN+R<9{*85Dc^Nb zhs=;0>Ul7BP31z_PW)v^UygdxAE83JpGltP_p2ZL%0!g3F;7Id0Ty4YYHL)&DH(gV zvEoW;=O&#VNk+LG2zuUKKG3C3@qM1Z~hX-y{F%_@GJSs$2sMtC39VV zY3QK*^JSRSGX$g9ou#qa$`u4P$x(hYHkX?}3pl9hB{Pdi-$nKUz{wd?_jE()Q<1qg z_5KN;!Z;z!86^XdTt!6JJ)EUC+M} z=`q;)Sz%uobpNB4T;W6i-f%sp1e9QS*Vl`Qmk{B7Xa%~9!>2_xTev>=0LQnb2Y$Oo z0#VKg@lc&FLQKI;YtLCJX;Sx|<_}hh#c!g>_c{b$${M{ztcZw%0H4z;RHFNx8Ja0n z3OsT(jDCVmumYvY9yi#F_N>3-=;#M1J~L%V@oW#O({XPo!#J1k^C*G!r9cht{FnPr z(_9s0s_w&Jgqv+_`8&7$i@G|xTe|gXuSh$*`b&s0|C0ZV4&8L(e0=musfbk05*x_F zio15+uQ!|!-n!R@#Rz~PUEWSRfB*2W#13ZZu@|CSsMTUr7+gJtOh9!$6iVEABgD=E z(%<>?K6%!!f3Ib)8O;(MQC_5X%K1=bs4WMsMNmxG_jiOsozz-FVmITPu1uUJvJlyc z+>IqMt&ux06@~Lg)e#NftJ=)|%-S_Ft8kV}0c@wcSKEeD6FulpRdr5pbB4roRO{K2C` zi+cF6i?)%Z9_9%b7NtC1`z>g!z;?MWRLjd+^9+Tl_Lpjv|-^1#AoKLDe1 zy`@gExHH4YJxfSBLVM@aW$YPKI@xXi;Ku3nqu3d;R6lJsZswnX= zNU9?eEy>k4lRh>!Y6b-dXZoPDp^5wc(OiiRQB7Lao6LYEUCUBNfUg`KMTwNQlN#3e zsvah-ju44%bx}z&1)E))~Qrm=%&eu#nWQqbHp*fh)J`T@zFmrr3 zGtz#8?HIHZ7UnZepP`|lvA{Rkzdyf7T3k?_7DB2|GdXV9NlkXFs(@0y2dIZ0Y*0Tf zY|9>m{sF*wF4Jmb0dmOEl{h-stcY&^p5f?gCr7@8sM{pkQfVQJ#oSQA@{bFA2aC~1 zkSHMBGAEb6iwU69;HV2enx)rj2Cx+X8L2%%6Db(IX@4sE(buFajgS2hWd7RULq-9> z^^?cFD%xq09`YrA)ZEwH&}lRf#v#+d^#G!^J1KV+0g3uuB;-6*7u<2U0hn%A6@4c? zCcVN0S2lok38;mxU~<0$)%ZX?A}0q2&xpCr8+)LnTEDl*_7YNE+j(W>)2gE7qYV3y z%#SHFNNZ-dKeW=dhik)HqKmrmrS3`oG2pGJq)Xc-01g!WF~BhHrJFm4M`W!d5rCKk z15E*Mw=;4jFux5zEd92Qfz5eH?ZYD(0{)Pm*Z(=IC+6o1dAMtAiu8zSW}2P^>5j+y zfYpl$ZQ7se$f7jvue8yE4NeLVg*l04|ly7Tq0RTTjbBAXtU8m*KIx78YsfO#ReEOB-4= z3eQm_#&h}wDcvVP!pWTH(^@N2Un4M+Zm1=W-lSgMCS*_}jaU2Q3oAW}s!h82?AteQ zuBQ9b0MP&?*CkcPJI24y&#c<7hI98Tlh56`a|cIYE6I>LgpAEcmJ3gj@pEk#O8lwHsKj!bSjzLdjVOa_W@LJLa1CLJ7U=zB_fw93AWH z>$CpKE?!QAON}Mpd68O-_Yj8!b~7B@H-GPIiNlrX{fQaOj=@E2E7xhsX~>l3F~Dnd z(Lit)^H$s*2qG+zteo^m{URy0wbTy&WQxR;i=<5BqQ)vzScs%?tM~q-Vn+)QB5ZV< ztHH}=(!qwP?{5{TS?;KWf$i@~655ffjG0yOMQ{TzUF$!R%th)Sp$zS~B~?_Q!#zMk zt`r7ks%O0%r8Wl&{;cPynk(eW2x>S0gp3Ot7lEl=QCw^X2*&&9YSv}lM(mFbM7d2m zN#2YET309_)4hsf)4e!8@t9%Lqd25ITRL?at2jhCnmv-Y`uw7XYug(E{*-f>>{FWY zCIZ1cK1xH8H7Ae}3q;wu{}CA&Xq9Hs1%2qs7wwb3Jife4v$P4yeDUEPn9($-6Wl1% z@6S}**5YMx7V|0T_Bp#MkHKulse7JiuF%yNN#d~jLG5J#ArvmJons^y-l(y0NLif! zD6u5qrD*OD*SVn@^uZNV2ZXG6LaplJ*5y{lKlIeTzA7U~9;7d#?M#SnfbVeEk z4t#%V8zt5D_xF#i=4M**UI8{`ER&j^?neA$7GO@(a-nI|z{`Q=Ve#Gk^-bK3AE{Iq zJEV}7?$XKr79-3#WlIaL#!HKPI1Qb#$$^^FkzYXd!Q@gXwQc-^m&ulSM@mt)m%8(* z8Y)kspKi~krd!EHfw{C@-n?Z0W6#gB@A9g z8ZmkuoTI`(kZ$weMVx{I0e}w58t8}Nz>gLe+;dd|7tE&;khEoDPya)-PKzv}k?yLb6 zW3ByBx{-xk2?Ttdfe_mQ0@244%+Uzw6wUWqdO9|L3oxm3lSk~McE5}J3RIXpPhnt} zNT6y=xrU`++zkf(YWXiUu_Qj=7?3!#9;gABGk{`hhD#ho{XApTb8T8lj=0VQFU_ z)53`sRS6M*g@aK54}7kD$WgE`ke?!El#UA=dXU`8TQ=!7kSFkouUEx$v*QJ`an&`6 z2j=nQNWjn$f&U&fNN(px%H`$De=+e)ma`MwAUHJP4hW6|>7 zD*DmBKii0E^$AjWslX4bH|M}4p{u?YDeHU5M-{_E?%5Rij8?WF=ns4)G0+6C7Ps8% zynqhg1&1FFMn?5zwl#bwvHO7<13F1siAfwRnI|3*BkH->J^Z7m)uP!S0&%Q4Kykc4 z{|uGq{u3yE3~ON(xG{T<#yMx9!auIw$S}5XAFT6I4l0|kURm)KS-*8_PL(kr2>Q}U z3@Orjkxx&Ii_x3oEqsM$YjpHI6($0ymcD`N?b0$~jii8HxYTKBM!Q}^J{09g`Dd|| zq|Q6!50WR=r^BSSpU-m7pyoJhHdF&*6Zb3E0;zc)lhFdTtIyX0{T-t~iEj1%{keFt+Ev*V*!bj3Iyx|}cNb(2wf%m$ zHmNQn1Nx5)01D{-<*3IJ-DNf(x~)pgbBclmW{v@qfuJyA7smo5j#<^i1D5{dd_brF zm0|q_dRBl98wIG?df(6bOcikP0ZK)aAT3I?k^|5`(1uUtNp1s;6$r{ADN zo5lI``gX%zfgY%|kDjvpM>ziZ2ko<|PWKsryv=`}F6ut|=XHM${{Qb$`{YryVbM*T UZX2TR1n_f1&s4Wu+v(AN0imur2><{9 literal 0 HcmV?d00001 diff --git a/papers/atharva_rasane/00_myst_template/Distribution_of_t-statistics.png b/papers/atharva_rasane/00_myst_template/Distribution_of_t-statistics.png new file mode 100644 index 0000000000000000000000000000000000000000..7b464e9ad906d9530d93f618fc95e8e4e8fd4592 GIT binary patch literal 71375 zcmeFZcTiJZ`!~3ZfujML@wvr3q3M5iqn6dH_W!fdHD&I~p5;1wpBy2NF6Y z0b)QwQBitNfPhFRfV9y0R($mReP_=5$C>lTnK?7xo^eQIvv;!Az3%(Euj^X-v9X~p zA4CMg#l^*^ckPNP7uR1*F0SqNyLN#88TSjE1V6T+O?9=o3K|cOgExOUYeBWRxQd_g zuHWDR?{|A$vqE!m9XQGPw@uIV#4H!rR+iosEi*s6slJ^-hleX>emYYL)Tle}6%SsY zx!)i7@Od-$Htze+xh`tTWyU;D61-_0c0Wc_TaP=OEBoI2u**BQ?|2%;I}jcC0#5nX zzfny+UU*y2uE+Z8%MkmYxWM)5runMy)dvsCwt)`<4j12W%++bEBnW*Z$jyC!RNc-q^#%Rb-NH zThS_c{f)y6jh!MEywF1aDo6d%>23?lzFth}5BM7KJ~bKBs@F;N^|T=&V}n=6@k`$> zGgd@&Un4_Mv|PCsy-Q>kL#aujJxWkT&kcIv&uq7WCrW*RxdIB3sF& z`3U<07zX?Mo?KiGH0jg?Ro?l~>#s9{5G&=w`bQwz3D-FE~H45_bpwrSo}4ZyVs34+edwIeG2im8)ztuEPEL_y^u@Zyi=#;-?jvf)OPv}@hQGcZ z?RQ0-Ie;F_jIhN9G?*+?Fyp4a&n)sR2UCu!QCeKu)ifYnLO!vvu?ilAd#^Lb{0s8( zFf)Z`H*n0#QQiY*imsjb9z81YmEEX1(;cPMKQ_HWWB0qQ%~XeaKyNJAc64@DR`aPD zsXQovR{b3M#PIGyjD~F1N^Cj=vPiGj4smgnouwOY<9ZhW`{yn*nB60dPrB)h3nue6 z$(0XqQUKLPuE$?4m{!0(SwO@Zq+(Wsjke#l3`)o4+rMxH|Gxcb_A(sZcJ_}b~R^`_9x zp@JLe!K@|XsNL7q!(d?vKBxyvsi+^!^ZvC@8k>VM7{RwF+G1xx{ngkjtDiQ1e2GD| zX6ni9G9HeuDgXQUReAG)v0xriYr&vgZi4JWXkCuN!l=eph3W{qimfL6y)0JdK7l~! zZQ58}L~rY@BLuY-FNz6Th76y!zq75iKX4}5CNXQ`pJW^%OYoidNa~fr*eW+DG>U2u zX%?(m?RZI$`-*qI)?^hTl{=aC&vjHxKtX%KgausY1H6Nt_E5aiJS$6Tta3QcW(us+ zI}CB3?i+IJaPLHJ%ApHR-5(zB@1rq&nZbR|n8SU-JU5ve)4mANu?NkigusxM;n$2f z8;^1B&)fNxk5Cj17-K1u@NKqj1d?~lBK*zTr2Q5hUWN!5&m#BvForvw##LnY2=N4? z)If@r+7ruJs`sRS7<*yBf};MT1J@&jt&JOnA~iODK0`fITk;yR*qE@Wz6;kyKHiU& zCHRP(zx6qQ6!~N+%lxV=K?=!>wOW13eHm+Qt>`nH$bBXGp0+af=qBk4-uOyds`Yu* zKCDO$n`#-<<>`tg z;@$$%XnpAP?tbv)#AQI%eHSo)nkjFWKsSP;GT$A+MI4BN`LO$Z8DJHSMDW2yrLQAh zYj`$rr$h{*G~}iRIZzpJ|Fa?Sx@G<>au)7B7sQkO7J>bJ;$O{Z{)LzYWWNH>E-7`9CiqZ zy9Z8J;CWr=KDh)AVufv~%+D{5BV1k?!xCJ0&ph6J^#h;sH+W znjSVmBsSV;&%VkwZOTm9-?y_GVubt2&=qgi#x~}I&T(6ByUNkZP8B6F3-;xvSh|}x&YOP|jczy5tzelYbIuJY zz-$t%JJ?;!2yJb|{$PPVi#`h3oWRZ1jKGGZZOy2G61$k{06iL}y_e@Nq%=y!C(*tq zGwiFxsKL?j2rjG+a;xnYH{@{Hn8XIeLOgiEMA-1{=IW$}qAAaTwrnUmEr~EJ7?3Mw z{Qi+-ka14h0Tmuix?$RbM%@m55r%=RA4I2pDQ5yNpt0VJJqj6E@Q<3$DjIxrJladP z2=W$lJ?bPLCCFX1GLl$;)Ia)Cy)aG_MV$GuQ#s+1_F!~P>OBGVZrUi!3y*Gb7P1>d zL)-!a9r|Pk-o18Y; z?M;;)A;U>kxRi5hN|hQRT*Sakrcl!O#a7=gp8&;$jm3*JnYX6Tvs2;Q;!||hn!v)c zcg)EOEC|dCESStfY9NmDbV6=>qZSU21x|LWM5h>twQW(Gi3Rwo*994kmqTrkSsmvV zg2q){Y}5kjxEe1tfdD%}+sW>9XU*{P>D)S4`-Q@pw8?CpuIpOGn{{ACSN>hK4NSB7 zwPncwqpj2~jS;v|*0~(PGty_8HReb;BD#Z#HM@C~HC9`m**2%?=j}JT)Pmg(`A1M$ zI>;3JMw<(MM0%HrW#*lpM&+SBs$;45Y(?|9cVJxzL9jPz1DbHPo#K9}#D$iFmLh0_ zqmQcYaSMwPU6-@Hu8b09@J-T5rVeR`q;)>co&G2oA$_Dp5n8JRugaafE^ingT{v)c zTUJB@i^A+Du_ml1RHeF2H1W@`e=@};$b4AX=%~t_ncv!EZlTjs3-FE5F1@rjHCK65 z2$Ha@mlBfW)>7A7*s#=e{nH2m9_h+y!Z#R?<55<0r3OS+NL=P%t3T1&=xEGRKDv3s zq&vfn;F$B>gl)q%WiA4&hw0L0}4UDi!6!14|jj9x*-R~oL2rlL*X zWFhrXfpcF@Q70}VF2G~P)hFZOv$_9VXG7VDSnmMIg^So2>U1ShHD#JqK-}cV$OU9$ z#1^Jj*|uGFO;4tY(-rnB*+te&8x{jn2$sTZU9~8}2J2486=X%{&t!%{W7CoOYJ>Op z_nyPh%-&~@>a(wvNnkw#3e0HuY7Moot5nlvyt6?QzJNAeSfiwt2@vzg1ed-kSbS@5W=J zSPO`M`&B~tw2+IGnWaToYLTHB{kdZFa63SHLa*+ zi=R;5w$@-GH9tWttULl`$lPyZR}Mt*PGu*0-0d=8P5Y+H}g;LC7{CWobdvWx-v#*=$!D}&=qOpbpV^DWgy;a z8|Qw6EZ0g@?iN^}VG0HBkE=-ewL32cO{}yvZK`!Cw5lg&RExpOTqP{-!Svu&Pha4CGS^a9LsOycr3Lsui0lnTAuJUF0J33_N_+8C`mm6 z&zaRJ@{a&l-%)>sUo#% zb>kVg3eP_JL&CjI__?fkldP1Q9dKUh6Q+AJ-d!7f(`W=Yn!LQ$`BSK7g{h^n#?;zo z6(VG=&VwWnT1jRdFKey_pH0STAB)n-Zq`9nBE!udjkN_ttz_v4xvoAXum(ATmvKh~ zvewLP6x7oxZrKJW!h~j@#g}Rg>-AAfW^Jhb%*xRuyNyx(LWqSo3Vu6#_S@F<3j6Ii zn$b#{ZPJPjfP7h?WUgiDblKfV36))(a;aXekZp=3ObS*_C_|VFN*TeE2RCUQzU?3z zY2M&8Ztt$9&FZ4OR&|37wq}KS+>tFX?`Q%+UOY+CwRss%ST&J)DRDwbKRjF}T2PUM zZG;cLj1Wqu8fhhI3nfR=uPZ5%-pg3Fu}vr4Lt=4KmavXEvRgn11SY60ef?*3jKq=g zRbp$QD~~05eM82fwRnAvV1bZGzE?L33(RSBbTmGgJKtnKx7{9l4f4wD%b0~0fd$)d zHg*^nykxc3hTHsMeT{Lq5eYRvIRFxr2k5purvQle-S6$vs|ZIBqRd#iqS{TV5e{m# zA{NF%(a1HpTbXsH5@svC8SYp+zA2q#)(xPe;+{cKfU#6~Bp{lC{CPy=)$r%^;r0WB zBCT*n_2%MPCDk9CUZ+W8>#gWee*U(kXrg}RI#<4Nh>i(N#RC4f^uSNjfiv~1e?4q4 z3CtQ6@mQT@!L;v68)tVG!*Af(q-4&Q zJ(BZ9Wlk7-lW=Likk)7c4?$*`Q48oYLLZVtR>$Q&w_nKiL0}WH#yW|TJ6bLfoZ*gd zX$!3e9ud1kgVg#?^Slt%4i!K$I+zbYA~uOR>vEo-HsJ@gdCJgV>sXoSUx@|WfIM_AiDA?v30LaPp3-q7>CURlvVkDL?}nlG=8{EUFd{7 zUk)Uz`GdrB4O~AgHAZ4=+|oLMVaJf^Mn2;*{M3AB1MOC6L@Q*kv*uPtyI*QI8Fj%J zH^L8n-xL1DDYBPO8kJPW|n^LJL;tO?5&lQ6Yt>i+C(>#w3^*@gG-`ZeP zzwB-A%nQz$gEke`oiR(rj}|sh()T65meB?oiAfE%_Z!4|JXHqJWWtz+k89!Ey@FB0 zRAXio7lJ_W9{6`E`O3FCZ9FSNyNOw5vZ%@QA}3-#xddi-@nZpXT1DwdmCP%%@NDF< zsN*A^7E=zoj;RXCC$!(+uR90JdM@)aC`8B_EgQCJi3n(*sE-yd>A`>GCY!v!)uyii z4*;nTOlwz)ylaHEG09eu4~cQFre9}B8}}WQpPv%*?JcgGyC<>!4h~b~JDS+K*nR=e zYQIZBD%iWRua!1VP?3k;ez-PmWVYi7iv6ygZC0pMX2Im|?APWz6WCr4xn0Whhwt~ja8*{zns z=UO+ywEyPePFT=ny(FZz9ShM($ulPMtV-{?l>p56PaV@E zxHhHJ<+>6US;-FxUY{WZ;wQ7n*ZL&j&O)cT-1Xt^3F}8gTMl{#gkbnE_U)2%+nmgw ze+f+>5g`saP0|bSX1ctxQ1z-YV>xF3m@|)G!qbd#H8vX`SQvaDpot`~fqFz3z=Q|RnFDrZvrF7> zV4H=1X3ueyp0+T6a$@|kUQJ3dp0PR!bHos#1bftU))Jl=up+BDmi zJ)uJ44OwW>Ex3zU+mR|P1{+-fsP$76E9=Nl5IlETAqhV*pq*<#xla4?XvUl}@^NgZ ztxgc9-zwa=U&jr5^Ts|rE6jwctM#N{5D-o*?!;#ro83G5FYV+>elcLf%O`VzE(RlX3#uB56#|=^j;#wRT*nW#^wJJf+?1B>^;d*#` zU8=FcHq*`YS@WZ*W1HhWs~o}TYhR*>zbdNtrehd3^UQ#k1A*T&}#Sr*A@p0xSq3x$rZclT{swKDI8blyJ#8Nh@t_-0l^#V&`QZooeJ{_r)R&#-|h;M1$K z*+g_ZyVbXqB!jthiHqy`O6XeXix(X7ClzX@m6>X=CDo;|q;NG(;+^zgu)P_3^~2j* zbR}4IIi@bzvs!}4+?)DeO?1VEt;gHzFM9MU3qQRcfBqx9-{q|wc0WX4(E70dj~71g zw(Tqj9r+Wo$=>v#)0WLkgc{R-@C1p59l4rA&oA@e5z>ZA%bS#^hW+@k-w1<)F*2Q= zw>!gkXC5&UvV*)@pa}*@^(v2N#!P`R@LV?x`52?kj}RptpAeDCXJk32o@85HG`6 zxHLwU-V2}2bk@>1o!Yk1h9(AP2g^*gE;cL7&Qwgt)|gg9OmHppX@O~L`OQ#VS8#`S z$EaYzc#2yD5B3JVFapZcqEeM&B&N&MVI$Vm0b_x9mp`h%W@?#(Ze_PtmC;h9#V!l^ zL#D5Uh9;2AaV-HPt4UA&Pc7&y3zk9Xsr31u+r2+z5VBJtr>fsgf7>H&Wh?@+TLG4%eUkJ|rn?*vhlsry z1J{m5)x1vj?eC$5emnQ6+~FhMSP$35Zm&4q3;Ls<)q?3RIF;y-F3okwP0TdGk~2u{ z=Y+oH+`P`nNj9IEOfpnu$@?M}FS2wyoL4;{=9o(e18g!#DJ8tm1y2m`bcfVoP`(V5 z62q;K)@g}n!&#Y{df^$|ah+qT69GuoJ_+oF%AH6`N0m2IXveSx%~h4h8M0UC>bNR7 zmK)yZ%yU!4sE-#>t39?kRw?}yUY_Z!!?t6avo+Z;#}BTGW(eJu`JA;>bk`!>67QBD zroSyc#Q?mRG2Pj``q9m2G}!oMzfJh*n-uh zDoWLB>$i(a1(O+GMuT~wOiaT(E)dLYiRQ>wv6IMgFuityB~k}lHNMZU3`W!(g7fDl zTrFJDdX|%Z4QFAT7CJU5v9YQF6}-9ZXd!*S8T7!~lBf)?qZ;llK6JZCjUpNumXqp8 zKp3x?>`l{5mku@@JpysY^wXb3{2gpQDyb6P2p!Fs$=Fa*6aHBkxkMa7hHfg%KFb); zo>?ig558Ks={wtGvDQ3_3Bo?9w@Ov;ZM6!!30Y^e0sd8<*NC zIVGHPXug=;Li`q31b1@~4CgRD@jt)kz_wq9j`lJ~PXfpPe&R2&M({}d^~#e#m^iZc zGmZDxIbqYE=Pyw{+$?q~Jyt_eyq0B3c6iTiQ|^@p6oq}iuZeMd(qWdF_06R{$Hd&! zw8Y%*%5vw9udmNiG`3cne7?$3_P+i1?(cZKHrCjbof1LK-Odv(o z0?Igy;u4_yi+beu|GGI>-{oU3ihTS1KW%~rkx_nU$ejbiT;vmM5_P~GNFjtv9Dj-{PQ_U==UF+T)uoy?dOO6K7hjOUj8|R`ta!Q1N;xsZl$}J^wffa zMtGI=)X%+H@;C69lrgmRY6AmQ>1;4 z1ZX<%-}P5-NkK3E`8g)IN!Y)tR+u+zEZg|li))rLvB!cg<1+?i!62X5SL`A`F(*Om z5*)NqJ^rs0mg)`ZpO~CPt;`Ki!v}59X*@;JFx%{Jrw&9~&Gggq%gWlZcc6b)kuLm} z$*i*~D7cO#9BvXe1i zda0AnUtXSE+FYAqe3A%F3d9Eo0Tp1W)!=MJm*1t7Kw%d4_oqp{CJ9Zr1vvaSYV2Ng zoPO5bnRL}Z0_2z`h$S*mv@HA;J^L>uYJ^s;(WsJi7uu!7f3-0{A=rnf*0494%S7Y@>vfuZ|KFwM?dpJaJNa)65 zS0ZXV0ktp424hjDe(lkc?l?9->|m{=79iovVflS1r?ihXDAz*I8x`LI{)ng zi$E+WG0C{vLoBSC;4XjgUGBM*kmW(2n%$?m^-d+kolRqA?DSrnoT%8KvDeeMpP8Dr~t~DRL3(zy!NPXo4 zk2iX>kpz^XE`X2Kgv+tDH-sl-?W>YH+oFCiL$1{AQp=`AiZ^QhqOMI-S(TForEUck zb)-lAV1^3rLibDN$aV;P;SLX5oa$-g@qF)~*x52eaEiQgOfC#I%qY42bqWMunXb71 zhmhHO65U{|$K#mmBT61mD~zRXrHtkaO{Hyc7+KDN;fi8rceVnRxCNdo7C2Sx19~S0 zkQzLzS33&b3u#Mtx5bm)h*ypc0^Z1j&g3u!luNVkwl6hb7N{V%0Y#$$>m?=kpYch3 zqDXp75W;{cF1paDT+8E;5Oh=|s&W82XLEV6zdrUfsx!|jYI2b(X=FU0=r`+dT3Wie zDtOJh(2kY|;+f3wP*p0h-As2hGdHi)R}mEc6SjZnl3=WigmAN1kInRReS&XXX?3n` zipWFB=Nuo1u0b713Ry+hfjZK)paNEi(s6>dh0t5qk+#-WQwE4ucc5V{fmO@^t9Y2Z z3Z!(Kl3S0V$toTFLGw@|fl$v9`grAcA~3!J>C865P1Dsnmd4K-UHW;SXbE%4ek>#& zG^_P1isPMA?r0&G*Ig#x`BU};A3KS{4=MtAoeDyW1zLNQ?Ir+}bdI8E>51GQ zJdul^0wQWDG(rALwDR{|s>=iS^6jfbqV}K8Nj57@rn1Nk+cErr$LHrqJwnsb?Mt_B-+sEy;;(Rdbp3%mP`Byq%O5)1Bt@Hua)!03 zceT8ujFoO&0j{9f={bBCJwsQ(mo+y|k2-Xr4(fg`WZh5n+|3Wx3@?R$&Awkx!~}fiK~xL;gt$KkD-Dpa{Dv-Bjv#{x;v^TSMtgnDki+ z4?W_KEyVEra8F4>c>}`E8Ex@HHl8VLp;W%;BKmu2a4FlVpcZ#GiM*8N-NAi;e@CC& zrQzHCMppF(Z6Wllf9i)<$$M*hOC%s&kpHB+cOu;JCl8;G zydMEBawb)&$nO7cLJ|E~UwOa5#>Dx;>rTvai3|LmXzd;;vZl&$_Jz*0|C!%XPRQ1O z{VIF#MD<^sk;~Ezu$;{Z{}OE)mP-17gNhp>1RYr zYAK=)f&4R$Suc#I`=X1H9WsGGKMMCv7Jm|V1L}LJrs(P=Z<@yP!1hG7Kr#Z;qH`eX zD%y>1xL@`Rf#4e>w~+tHS{}W%Pr50tt4{XCBb&;Ac(sLr?eX)M>Nsh@_Zd~DX+Dok z?i^{t%}nb(6IVNPn18g>3D`}#-<)%tsy|&=#<5{M`ulh%{lRZABp6#mxvg*~@O!QQ z()S-öVwOgC@{8s8Ilvc@a)*f5_o&~hm{`s!gMjA_4f{F!}#Wg(kkS4R73>}S7 zz$ADildWFgIzJv!3fRzIfOXI_2MohUdn?eWiS}F`G0)(W`*tVBTx@DliuPezy%+3^ zs;4NX&i(QSg~gG~R`@;uH{$l9>?Q=C5D{1M$6Jy z{CM6P(^qD+l|aJQsA7ErSUsiAiG+~^Wd2+}nOs+uwD}k~fpQgB1wxBBd?2V}&)05s zT^{}yb&p|W9B(l4bieIblwrNea(>L0bEeqKrS{n0a8m9+qdPU0!-Em^Aqz{OE{O7vnRdt@oS-_s0vOmyR5jNwXn;x+?TcpagbGnKya&8fHTM~CE zC_-$iB!}-6p8EMo!Z#LuTsOK56fCwC>+&G3@iIf^TH+=D5*mOVQI1tBvc*Pfi?2lQ z$?bbwKHut}BVu$J2_Qr-iulgs=l72V6CR(N(-hs8;|_dVU!PX>?Ac)VJK8rUj{aXi zb`mmTJCBR~O1(wzRQd`lMMT0*=njNwhFAhzo#iEQU#SgTTaW5c5rZ8~Y1D~DtMmhB%s6*B9v4FA z!I+ww-sI3{s(9YBm7g1_WBXK>ezduNel9ze7OLUEkvg30&8qXy{s9q+mX(-X-lF>Y zdiT->MYlUiq|J>rs)J#NyY(&5zX89^!yW*qJ-6@V$3&7{0S!xVVFb?5SZ?-f3A~?{ zdeKJg9)wTNNeX-tW0x>B2(UJ(&zIQ;ZyJZ^7ZYC1H|Y%mlK*5%gmSRQESRl#00ILy z6=Kr0qh5w0OOO7Y)E$l=QV%L82H6N>?T{DXs&497Z={6mef2^w<5T&W)DN>E((OWjM%s5PHC_DFc#(BNb<6Ek58NWCY2y5(`5F6ZCzX%?6Fm1qHdZS% z4LUr5`R(@5U%R2wny^WHU3l9u$cF?X#!cUmlFOYxK80VUH6JqR*rb=8G}o96Ce^Nv zZh%l>NNmpTOuUHB7V~S?@_B!fN%~q))hV~x$UY+yX?6aE%V7h4lfl|*NT@~`-*1%G?ebl^2{|N9e0)mv zR!i~ZKIwVIBiBtQv+E2+72V%(P_rT^=bVZFhr=s$Zghf0kI?A>DZDpx4=Ss8&}?uC zBs+S0F03y^_>83kE@;q3w%qeL4_k6M6@F^^vcgObZ zt+re#cu@G93~QsEUn~UUBuVa8M*1B(l}&XpWjfF$3AQ|aN+oXM8%*~Ey@bV zvEI%%E+sP8>kL)KPbf9o*tptOTxhz~#;>+u{0oyNp&N@igoa1SgI_q%6T()M4{HmN z51bf&Z9h$5hb+ZNFbYGK91}I>(W;YEXov`oKc6?#l`d zSm%=n{G_6ZP{(RLU}5C_A`i*Ahik>FO_Ua@?9EhsYRV$?R|RaLNAdH{aRmd zE0rBg?7(cUuN26dUA#}|Z*L{|n9-11NWBh45dlNlg31wtxLXC@9yL38!Z%OwZJzj> zJ`=4v-K|XSZOt<1t0J0rSi&#Y*9R_@4Bp#GcZO+@-d}!bmFFxGspDua{)q8IPoiSJ z-FLixFvr7AEOktT9~8tzzJA3WQDT(4!}`MOQq8b8OP`NF>*knl2`rDwaY=SaAo5zay9qpa zMBTrWatzt>ZNnh*l8v3HlJ}TLKe7Ghi|1<DsGK71egSjDF0H`jhYHB&lr^4!dit4#kKko6T zy-k)6D=Sqv1X~pFjiv<6>771%mg4Yyec!JD)XOq8hrt}@M;c1C?(MMB(ZOQ1kCv7> z$GQKL_%pqy-anr2f`)IIOIFWwwT2z;ZU>%>^k2Dr{&pl`B42&biuoi*=K}lei#d0H zR4;e@Vj^3HWO?F8h=(UIkd-(OPe9RK`gB<8Jz0!*$=I521pFbmCJAa9o zpHEtuAGN6t*~rs#fc+-9!zv)uuN{F-kC3&eV!(ET;GspW59S#9W- z-bx08_pRz~aZk8~@3)eztv4*60xLbEQa_T_F|;9S0buQJAq(3Z9AdzBV9emezNO`% zXAHoaxCx-LEGS98f|NH%>%*ljhD!+=8^4-Z(@lxNFxk=2iv^1VL$VHV z`sdvff1uRwS6j9Go!4SBr?o{h^&mFx*DzqzA;xAKS#j4%@CjyvN4ex#S95CnR2%-A zjFQr1b+VP~*;36fn339Tj?aPy>WSj*dO_jlT$%|g5~&nC z67M_(+9$M-7ad1V1h#%^BS+vYln7Z)bL$8Kp~=d6ZFjeNQrsoy42d$~9J~o~sW0mt zAr+RGxg-|(vA5gVvuY)qmk)1*^JpsHcv>2d3w1i#@@R3Qr>#F|h%a!n!g;F0qOQle zO`Y_lys0;rf=cDpPvVGv9YPd+*55GYy|@FRMm5igTSKOgVejWqEc` z&abuH7s8ZZN|fAZ%sUQT^+VbcJU5V+;mQl34pt5rynnNV|FT1HV1r$~<-KMGe9(DW z2Cpj*Yp;m`loGtu<1BYAaE(RjtB^8l-7J$j)2e)@@)YD@$b2HRoF!x|?PuBnpl*ks zNS7UBOc^8+-}jxPI4I?0R@r!;zZxqZKM3IJ7r_NnCL{$stT&_M}B7K z`tL0(lbK=Hx6g(Gyc5MrgwO=rIZ;LiZ3}f)H~cyGVN$A$17NqjuH9P?WmB0X8;^>Q zQDXR6J+QAp8t#*Oa|*0=3dffT#_|GnJUej;*aS*|XJGVSSGYlkJ3)jIvc_WJH8yfL zfa$LY-FWd6JT8}R@aZew7`myb5 zTvYaqFGGydHxW@?h(lK;hmHfnm-u2SG;}30_>%vVj?ND1On#VQ_z=KdF4b#&GrjDK z90U6OSV)#&&~V!foLglO>(|~ER<~Ny7u^mn=809$fB(0j&*OvyCJcdz!q$*8O*Ip$a3qpYDkxYFB ztO_JR@U5$fcPybt@Ih*qhGeorO*!($2D>q$24kX6uik2ET}0sjSwL!n~tK?F8> zEa5186E1XfCB6%R!fh8I)7ToHZx-(p<`(yAeotmh2hx31SspvZ{Ug>2_MbAMSj`K^ zzgG11Agt0ot2f*^DE~%k<6vft;oYOJRd&tiMqFoZ0YHf!&C_AFS-|19Ls!1qPl1)> z-8Bh-E-9bVPsau!wKx(2Z-3QdzGnrS6ji|F^mc^ZLet|3r;W|1Gaw-$!48>f-b>ZM z>r>&Ds%g~a=lB~oak(7FT0k1jTC$h);rSnj?pS1H`_yq*V;)biH6wi%%8EiB0y^;C zac7OSc;2^$8XFjks3neT!v`L0$atjD8*6=tH0G-@)>aI*4pmbjm2HNq4OO5FRR-ra zn7)r$iZS{me(vUdQ6e-vrCI|p+H8dsYvp}pmorYc<8dA4D(=g>Ci_b9iv?1owCw?4 z1I8Y*=14Vx5IcQIuxcEB;T_=#H2h7CEd0Hz?L~wreESFtn|wgbVl?gJRtn^dg#VAf zDLHdS@6!nrf^Q$!T!M>k0$$?IWXm!lSljhX6SCHTMx7Pvm2>;aKidQNv4sBVq99K_<;; zSrbZFZ6xq`C(4vsr-Rn_2K{PU4?nUu{Y?F7{GebKctMfPzF!S3fK>HQGB8Bl$)weWh(`r*slUUEz4Tb$0ND;1W1e^NIbx;Yv;l^!W~H$QG=;bL_}UhcZ% z%7UBJD}Vc~+abGoNI%juzm1Q*iaTTFURb?ZN!bIbz1t!LX%dPx&I3$oq7)A zx14?C7Xh^ag46fj_RZOc8kp@Y*CSeemtu`qg)d*Y+Z#0BFrC&O)$ZWGITyOc4)w@> z5hOBZynS+G?savU#QMO_0$sm_u@3M9dfXU@ie&zVr{SAU zKVlqIxfBH3l|5&`_k_)0Zgz6yi%Ku(1(?eY0r)!*s*kh7JN5_>p3e(6xTk&S#y|WC z)WJkzChU7|71JKdz2W<=_*@&<(q(qE ze&B^fCp_?ke0qhF0^aRxA3fm0$re15>ve4~sZP1A+;ycax#r~ULjZ9_-^d!q_* z*w>Miq(3Hnr~;>Y-}DQ|v;$ov>@?qK*qj^7cviU+o|_%;%=?8|VYcf~3<$BrbG4_8RpXBvA? z#hwRmF;KMN~(Y@jVXY$6YiJ34r|Wjg2wC`^CgjiH^`@{%Pz3t_A%s4`zHKG_F)~3(?J}mx%auv zS=z!&(FlXInCM)6WpA+wYog$#sP=6WEc1B?%8Qp~;F3l5bZCtfC=9 z4)JaM>>E2qDB;K8G}V2yk6pr%bFaFeYhPU&XO-;32CE3%K40%5pBsKx2per})`t%> z2~WB!l5C`YTdv-{%uqHDvbdmTR-UD4a_D7^0Z&pu6($MnY^lr`xf}NTZI$ zc7^s#0U&s}T_WE-fv^a6zHLBw?q;|-pSLlly29?nuU`uv1v?yPB!io)5pk?(XS>}77f(#dqB z(&En1wx|lgg7sl7&gVU>s0eY6eTocHMvcO9@DEJ|a$8;d7P5r}IbBKEw7%B;8a}?b z!kY&-5)oU56ij_yTHRE}&W5~!4_E0`du9D zPx#3S*lh%nPBPJI=1AV>cMXdG4`J*ZZfTxd37MK&&+a~oF;w}&qYXJL^cKK@wPy16 z9eccVuGec_S$VuPO!1qzK*v3a|7B0p!g@kJzRHktuNPnnzLB0aHO}^y=Iu@=1SFpK zu7oBplUYR@{N%nR2KBMR>P_{AN5uBr&1msx1LDC5x1?x+~+ zCfsbh=$5Q~1tHAise%itzfmTDDqO5IuaCean+;~)=`S-JEqrdOmAO>-wr;DxKGLix zi@A6SNs|$9Zb%qDe>nMd!Q%oVnd$gU&DNpous3`nsl%Lj#F$BReUQ0FKW&sFeovFm z6S5iZF9#FFwNE~fkyvke7V<;f5OMHi6yTGTw1qY8ueuh?<UjycG(wzGkHTL#;aEIEIec|sM1yco>2F19la>~K&_H}pMI&IH?=It zqv%?^Ri5sMH|u(;s@t_}+hudyY@Vl1d-@9d>}6@wOK99{GbPL0dflt=4`)Q|(1``< zv>FOj?qcg}Q{40~Gd@qDQ=RdwB&3J`;*Y&uR|!7{Lgr#{<)+uO-T*LJn0QSqbgX_q z{AJFGGS{LJBNL*$)p)MlEwp?=l4NM}t-YJ+`YK}omXf@Ot>3C=0{Imu)!q)hezNT^ z&u%HjidpFP0as)729-M64ZCl;RiCJ?gIMK zLan)#vY{;(e>Kct14?&hjS}A`{?IA2jU`~vZ6)28nVnma&9)Ydb~QPfsIAObhOctO zdeRL$?h-uQRI>8x?H5xB-{oifMTQHZE*cH8|6WS-sN&;U=ti-~J!om7%s()NM7n`c zYQEcb1^8Q#W2m_i=diBU^6ye4J&bvh?Se(ZyvAFziD8_S1$;cRU5C?DLRy0t#f0wAaaI8PXyqD~!2aGScn-_VQkhz_sjp!WlvUt&%% zIIRU`o2lLknWm8V#8ldM@QK_K5)vxlo(4=gS{9Gg6>7K!L?TZHfIEH49xKAyp<|H6 zk%mO)j*bow1WrA;7vx%$8!~qYD7hS-*%{bkGKpjxChxCM)wIYAjXY~x-n>CYY{hj| zCo7B}Re1>Xb_0TF-qCTZ!qyy}TDK8#{TCd`xT7xrm`I8Guu0^7YX5UPz=}b45%%aO z$B0zsm6gd;d=#+g-&VQSx@Que$FDy1VvJ7C&gNhEO+lr4BimpuX~kXei-%MKhN3KDbct~wDl`4R1<4M3s@`x2T5Du zXpo!pZ_+O>F@1-3s_We=@OOU$V|&M<-Wuk;j13%DB)j&S8YZg|G=H8_B9~G!9!WrH z-u`x#Kc{G}o1SLojBkb>P0*%fC&#S-SKguDY6=#%_}pIWe&dbCIcw@nRJ@{_YkqRc zq=RVlai{lh7oc!Yz;Uve`$xJ{YS+5E*avUk3&`(_?&(<(N}Q|@{@<>I(p_lw|7L6L zNU4^ubvi>Jqn8eiM359}D6gF7&D(qW3WIO#bnk1D>khro$Ld?Gp=gipmO2r1{37?S z@X*ORaxV*gCZ2lxiGJywar(AbTUB(U*xX7Z`BGeCRoSUOHi!TIOpHGqFxP)~D*oXF z|6j1m|L2PT^&uqxKOOKgt4rfn@ZeVS&Xc{S$txFM%-wYx{NsGl)Z$(}`+UP+3r-7G zcE-{+R!k{xl>#do>`gqS&nEYA@&8^nT#)nuSrLBAf96-22BPe-mg?)u$yZVeh zHOL<6E=ln7aNG5tv-j=!E6>R4n<2yT5u$u+*IHeF?bK5mxhTW;pv!P^QHNq?-ch|t z*FP>mF2NHvD+kI<65(|5+5kpMZ6HJ3W`QiWV-Yi$_}b7$rPM*;&vGc!(>rksxOgV5 zM2f|;ZV(?~W%KOJ^8Q#p|4;P(e-gd#L@qU*c`{~Q#c;{m5-l{{NoT7h1ra^PL#`9T6v=5x~$-R6FpEGh91}TG5>*{ zaHr_+t7s}^4WR1Mk%2!pDwc5H&il-1C(`m1DT;1ZQ8ko~@WX&&|Cbt0O|9?snpi!0yzY&2*@HGU$n%UF8-F+~3`}H5HbvhG;8w#PRFN39R#UVSe+FC= z$ZALt7gfZs2MKYiMIYS5sm=HwhwXu{Ed9^?2;p~*D{=YE9q;|sRkiQPAFErR?UB7j zE>bR!&FC-f==C>mh_(~kY1Qd+3?IT5;bxyd=Xn7;fzs~F{_?jj2fnsCbGYomm3qQt zjQOXh_{smQBSX|BS3bQa{o_YDwJ|p9M%M$v<3k{g+_g!Q{bP_DSdR4^G;R)vz1pkn z)#CNpykmdWe{3gQ$_bWQrMRow+c;~rL9hxNVUEBR36XyGn!E7+p3pFlvn)yVeAH+2 zDjn_j9fLN?*M=E8{*awx)2&WlUX9iT8f3Q~;9rgPJf}1sQ5R%mXRu6G2iyn{&B(y! z#W6nsy{s1aOFaOiThV~iU;^{>xB0idaaQ9Px;c1a5xI+OJ<^!$rl6qUhDy}W>LqiO zFxR&13e@Poh^pc@dLGDXnAo3kQmu)58@j_o=;UBsJ!Jwsd?Qz~hqp7owpNM9`4_PZ zlm+r1h_AZX4*SK_bF8FDH#fA6(>)4!4%4$sbB_>rznuI<>azg51_=ba7dpGm1>bV4 zn4D)AF+#UCSm5y)MZkaZ@RuRNf&Ujm;OhAXf<5bPS)xPQiG6n+X^VwVD`sV1%;jYt z9hbcI+5L$kfav+aX3Kfn4+Hq1Ssa#`^LQK|@Fb`1be+hl*VXG;U>9;=+;z!5^8e!M zO5mCP|9|OAMLL1%7hAx(2rW~Ou6*{CtQ7j_&m}IsgRw(6IJ^6s5<}S@Lq6 zbZ)Np(j5J@0PE%k*ezXX8@LYfE>9n!hMc2I&|g>dw04{mHhkZeala)$(X`w@VAdX z_&|P9TsOo8Dg8*wYQG*I?mLNivZP1iucY$Q-(ea^vU5EKKa{f|UM*Nm8t5qYFgI%g z&IDEKG6N8PGRE-;l;+X=J}ZC|RQ<-E(Kjf|6r z0UO};mAFnU#8dpO<(7@bM{9uE`7PIU_JfOBn07NeyIvL)^w01sK=Z3@M0_}xE~w?* zrM>qqyt-_ouEv_c@3+-$ve&Yw=t25T@ip8vLzqZ%a()H62a943+BEmxQN|aln0Ddi zNT)lgwOHI^;?hw0y`jx%=85(=s-m3@%RO3xFIAQvt9=9#dVPkC%TJ37q~Z({&Do?~ zq|_glFH?RoEz3ucLN?CY!5JtEp6^d15#p1ssSR8Z@pHZmfH&vMPy#E~Tdi)mPj>XJ zLaPXPPIn{(+8%t+SX$I{JOB}Kf>eBm0n=b?QP3Y=?MWtwdD;2duMtLH*3&njQuDUV zaGA~n|L*ANLs}r#DB0S>%l)KSEhza^s&L>QAQGSEioo%r(W4#9MRecE(K7uJb>)oJbZf!(Z6vUk(#wUg8${nbw~Vzb_u1l--9xh_iby;KdZK~XLJ z?n2R)4JBw%B2s0RXovWg2uy&>v9O6XW?_vK0ZZUvAQs{rCbP+xl>F#&VnG29lNC?W z8KJV??pq+vtGA;?;U1Q`*PWEECGN;GbMaZ>ysLjbKolDrekLdPPhqyVXSjFav-)$( z?~`f4*wK_9OQVNPXz!!eQdDg&UnbKGs5)*@ZAc9i>R&uKNbU$(lIpUjIHrs|q{_4O zNzwP<#XmITj7YE*pKL>&bAUw|0xU`-aq-BZNtVHjwc z=ZI=FX2gbOYUdE1SQt8v zvOIB@bQW`@Gr@E-FWdmLThtCE5no88@(?*W1RjRh?n@WhZaz$HT;o->MfXJ6g2`wf z?5Piqgs?r@CVnsX5bv|7p!NM89RE?Y_g4z?r$!vX1V#-s5L`>I_-9Kc^?UIi)1OXQ$aW2I#{=DAp`fTkX$0Q!3#1Jj_L{ zcSQO!5+7|AdLI?9zZsJ>GCz9xI0 zWeVr@E{R0VdfdXn8d)15;`0`Y05bXIC+?6%NI6tnuf4a%~+L>I5$*ZyBrz@Q?Nv^v- zVYOE1#Td)msW7+ibMvo4SHAI$Nv!sB{34TkP3ROFZ`kSfS`CSd;JP#d9k6I0+PTWY ztp_*tU)0N-aWiob580B|br~>Swsz3krE+wnUqha_Gx(H1T!=lzG`z%G@o#}80}5V# z2RD_pt}~t}q4MuX(z|jstjAKmwKuNdq$}t!uBr$uEIHECOBhe39Zt&BjXPaz=dae< zY{gsIb^5mb)_aGNVTPtDJ2jZ}Uiub_YWyJPM2V>8A8?6w@JuD96L>WkoeeuO!ZyOo z9Ft(7IL?hBURntSTLfto)#*@BAKlRG>k3kExnEdyX1+U5aHOZ9z{n8%G$`2a?lKCt z1oe1JhNBDX^q1b`fD5AUGhnI;NNxAYxv{93F;}b^;^H_8Zd}5%VR!VU;XC`_cwY3Rl?J2X#BQU?(GvO zo6#o{f_lH0qgCvSLJAl5n-fnqA})iXdNo+>zG<|7baLk7mVr4{tZilPFZK|2iaEcc zNGN%leB_xjCiMHJw`YO;rdZ!)8(qx1@Y)YnRdeSXax2E~)F{7Vp{xQk?4mahw^Hsv2U~5Zj}YpHIq#B zail#(xkhg5^EW7W`d=UWX9vPQf!jTqoG-#g zb$;(iNDz_&NEdYszkEO8cTH#`&tUs$dsk8$}Tpw*I=-wi*9E$?yp8E9N)zPFUiqFQEwl@Au5SGF|3qeog>v z;5a6mWxS%)xu?cS3`~hY!14xpykLOEJ<)}ymY}`(ZlBTND_+4KnYQFL;d%3AWVJg> z0M1|8_r+2a<`p->hrkeQrVd5f{!(E?I1)N-zAVciJ`!c76I5Yp z4{hn@7YO(5dK$%RnMPW_c89Gt!zS;A`|CIYKh}xWXG>VWNK3WnHa*DA0OTj{Rj_9^ z`}(Xwx;(nmphiu$qA(p*=W>{FZ8o9nm)34;C$xuwZEH*rY4Tnjz*}%wq z+j{XB!%Um8Pjk2GZ9J8A{R2UdYsGo%Tc9C4FGkzxEv9!gg1s2Ax(^vTCNSsl3!!AY ze^%Yer;7?F3Ri8loRrSOhIqVCsUsY2)TM|O4^9MgV# zQKd_ppH6FsM9V+Fb-~tQN7uQLOKdZ_TKUP%o`dt2{SiJ&_YuBTuu4PkyMyR2c(s)| zE?f2d4H9yRzO1?+stzvFTiNa~UU=>?N=g|fPNg{!Ml~g*X!qjzdC%W`wZskM*QNKO zdj)pFy^E=TfdyYI7Q&cfyw6@t?FA77#MPNKtKb69 zuVPoUwe#V?@5u@ladg^Yo}5=-KeI(r!$@LoUEjiMnsPJ@?)^(qt+0>jgIq0fft~Oi zNg`=FFJgGn?z8!T@sf|(Zl+oo$#NUe>XSZbt7KzyErphE);**WfW38Kq)}5i(fW7P zm6V2tA75$NQ(E!)b3$c*{eWBJrkHF`4>h*W{JED}D$wC$Q7xPPc^2+&F#Wt?vshG0 zCL4#3mmGHMG}{b^aX>xttPEGUIHAWA;tem(mT+0gZao`vnv}Xns~XEE@-T%#-NKuB z?_19ru6@BSDP9kw1@`etll~jRDUlB$yRLCqGj5;#q~CIxhS_+dkU8%GccYyBSQJUm zuu}N>1Q(ZIrR>j>F2*$HG+yrX2?%DiC=$9vz2s54p>WB~SpD9CuaN81O!(hw#F(*S zmrxd6)Q^Yrf}(^+aqkn!Xis^m?6v>z4yD>1XgaXx6dRdZ5bv-(Y`#s^w zy53dn+?Nh~t{g%>X-lh<;y6~imMJQK` zI#;x6x0Hlha)?ou;F3;LeQPB1D$o$KUnb{E_a&Sv2`L*0=doudZH`A3TtI~15Xe3e zG+J-HAFGUG#6swl!$t`=z?|Q~61{qmMv&9)G}x@qCtmpXC6$#x>R30azn<$G)8uR1 ziJw{Q+0O9)e<)~|iouRY`i8)1$L_#b!;oW)DII~ut&zL1e0q&L|5&mSe+$_TMIMgth?}9V8^4^ z)VBP>jy3L>o)(QP`JVjwGIwt(dxZt_Hg_6GSPrA!eBL`z@16=?^FN=1;)0)w%#HkO zIV?i?gN0ltE6MFZ?l6Kq5L1rT3ENFwp201K*Mma6BzJI>H)380<+_U8ZDs zTaH2T=SQ!@xBO#FUp8#q0XOJbI|z}&Hy-$Boi(zu4)Td-Ux$*Ij$Ol&c=;L4R&6Fi9 zVHt-}suXM&)wbe9Nk|l+M6K|6H$(~K8DPbHL1=}KsOlqnp6;M;kGlEF3m=Wt0lsi1 zJlMaV3VwOKKaJ!WG3YiYEkf0664Aqz4g#0s*7K$8v8gPhgP+s+UNH#;l+zUTd zsrPoMD2<3aDH-Eg>?ZIAJ&fJmc)hO$COzPe$hTBQYmp#%r4fuhI)X_4f&59)ej|iy zezqt1-e*LYd^QhMI@&A%5Dvu$M1=+)1H3Z1-RCB20Ekuw6-|!QD+AaS0WgA@fk05q zSD(u36lh2*jTPh1TBnF!%4cD7X^@CH+HDH5M|>a*xSovliRy<0f4+{JM2GCJOH|}G;*%$cyY56vUL9=_T{BWx-q#;Lud)^ERF~-AxiR^09=>nXVlQ-nVWEq7yOP&u5?N zd7F6o>=Da@sWttetja>P79!!n%=h|OV1>1X?j9J>_zO8z zdU+8W;l}(JeoE^DZHf~nQEI11Y4o>GKxe?g#O-&)?uDx+ou=n0hfX|c(B9DIo^;#n z`J>K}u#0!!nf+pUQ1^{m&10r~$*nW>1sFDO$$^!}5NUvy$G4zE`Jl{s4CpA8BU{kg z&A@4H8-acoAiU7_QTlWGanrBL`gmYhpM9o4yD=#+%ftQZLKy!$L4{{JTdhl~Fm`=^ zu*p|;z9?UZg8hkF@E-tT^42co_Gv@x-?(u1PftB0L;Z88noXv>mzyk?=;FfPj{yHG zuUtmEn`(Dhx`22+j{=(OoHez~{S%O`RO$U`_^dB*^8JF|JjZdmAejQ>o4k>v%#aFp zqfg8L2InTr#?TYRTUjjxIhnFRgpb1G*-e0OKdJbl=$DksrN__{#Jc!H3Dov8i}v3+ znw5@_yR)ELW&Nvr6IwuF!~%YG=4HacCR;-oK>9JjZ9S(P=w_QbUl5bnVlr1!_JbnH ztbwX%UeoL`?r`GS*!$;WQf0(e-4Jq2Qi6{7K<;_w<8c#1L&Fnts(+WoG?L7eY?&_u z5gC(gjLLQAE88`um!iYlSbpD*pp``pRj`$YTMVv3Ek-C!SF!LIsJA`ez#mP&f0yk!l%azW&uxY1g#aM{Y1b-w{)^|3??_k_5=4kV^pQ%qxfc_z$$VE%c4 zc+(N}yb_c#wI2t;JL66r6r`+aN^YsCelEGncNO#y{ujmjF<^xJl$_5$2Ke8 z&G_jS!D$BgjT%92;IzXlXLYFlhGpj?u;7bNQl#S3AAJVs_Pq>UHwg@7vs$mTMA(>~!7-?79MKCn}G$*Xl$cHZ?emMjo=@jg3c-F@+x1?J_RV?A#BQ5E4{8uh#jGPGyIW{a90qLicJ0AOtz1> zVDO|~IO|Loeyv`o$TxhY2r=?9Q@?=#3K};i#t^`N+Xpf6$J%2-hdZO}_5wQ5p{d&+ zk%fb!dmE2m9&2Edn($x4d6I`-(N}VvWnDj>&sywybJbr)tY}EV!gC!{`z1Twwvblr zA*qx-hJR$EyaSg^ve zHZ$N9Vd4N+tl#6#*)^_<4qfHTn7G1L5q`)Yfo1z_#v_Ocw#v^6Z;F`y4M z&Pu4vWEJ$Puqhyf%)WDYR|^o%7_F&Ob6s=0e(@(odnMj|GI&LAbF3bZqVVp=?%AKd zir3(b9%qj}o2Y~q9;By;COp_jFUIw#NS~WI29rYw!a%%AFJ3dG-u&eRM;S@zydbNq zj$X@1{tmqDTxczX&MyIyX`5Oy<{pp7Yuz|?3~Fu{=Gu~W^cZXf*GbQsL!^mfAcA7{ z^eTBzUdRYI6DhllWE}Zbk@f8NxK30$Hnp2IC1a?6pmyI~h!-~X8))L5F*iON46kKJ zNmE_|<-`-eFLw~lCxWLxB3W_wY5JoZ<^nq_ z&(V}E?c+bbl0l=RAQtGkzCa1&0eh{eQS<2YAmM|}r3znS;}k52=su!3VyxvLZA9x( zb#!#}C_4Ltg&4Z2qVcR|O9h7pdAHt#!afGKrT zL!?5!{1smOq;#;FQIL1Cr>YtRh@M&(T^&i=0$!_~)c_7DLxJcW9-sn~@?QQQS54v= z?4s21Q4>=%q*>dIRU%{E_w?Y^ma0@VyPxZlLuN%6da_I^(quB;bo*r(n=0mp_ zuZL5Mp?{#96`Rsp4K>htoz!dX%ZKPb{pTD0ZtczpS3O)8etT>N#qH0PL!{IyUtYn* z(n)>FNd*=RtA;lSiXGj%roIt$;}NT_U>|FczG0~vWW0S*xZKQ+jKr%b0Cdn(Qc_MK zUI#Z;uL*2>;920Qy8~+4!^(1MG~SuaSMVfp!2e<;e}@vJS91E{L?aS<+D}*dbzIQ( z?9lHMJbPxC_;zMDSfoP+qR0Cite6WKVwZx8j|&LsouC!iWbn*It_I0zSAlpGbJ79mj&V{|%<7 z1vn!4jbK{XNDnU;L~o4*=>1&eUK7q;#P=O&eoyaw86IGAH`?o0igSAJC}K`)#ppUX zp8w^8$lO5twMs-U7nD<6X11L&Ggi^!%SU*Xf(mp9`2$&9Wh+Ha)9DHc*sX4ANZ&T9 z=r9-pngb9)zu*{gQ+|2;35Wr&?cft{LAFq3l;%dL1GIogco)*X%E61PGr}dGt3&R= zu>Fbo^l6I+ISNeP+9yV@L!A267uN(!>?6~OL%7FTS-$FN$o&m_vvJZX*E{3k@fQ=M zD@vneoH@ZshCtYmsK-?sgM?(oo4P*+iLj**tp)JrCkbmfGQ!FI1s$YvDyL9O<#?5%+ISy7RH4R+%g;j?=T-d^F1V4yQ|Xl(rjy?g(ps@=y*>21tm@ zhe(%b|t{WN}d{PT#&0q5h!=7k=KwQMpd*%4Yop?gRMa@2m zg|!|-dCNaY8#nFGvY~ej^w%BU%~WSKmjDW3#}Cyw2?kT9-~1_VBK;g>*MrG!N?_kkYQ)u6 z9rf(L`Dtg!Dc=4`+SaUD+xI`Ny)vv4W}~*$yV!=f!ATyM+GV$w@38h(%XyY4u2`;< zA0xg{fHb6%kkQBmQeq3VIjgD+kF!2)l-HOU4l z;XhIYVaj-%OtxkBeHaOBSp>*MfApv%#2uhmY&k(cc<#Bto~SN52U?iEFP0q0yY&~@ zwQ6Iu@l(Uw_ay}4ra?T@SMx+gpwOH+NKzL)GLA495VtAp)qCG2hchj33M~L%2E}k1<&u3-QWhOdm#=q?W|v* zr+Y^0f$O^vpcn9={m#UBji5SBFwbExkI&q1N`79Eh( z4~uN*4FjaEipWX852BxA-IhXQw-=p#imHDc*zcBAi)rvQxu?MdT9*I_0$+DcMR`Ft z?tHI}GV+JIQHdP*D*BFHRIzh=6Rk?wV5OQ75*V!ou01s=@qAm%+3%l@A$q|`OxJ|S zSo-_tBT5`2Kz{AsHHEPZx^atylp35j+P|hNiQo#Mf!V5(RZ!Z{$(7}#KSCm62K>X< z6;QlnI6BEc#1^h}fu(rN_XSa}e4VNF{4}rpF3%JSRXo#6`7K6a8yIdf98r<(WX2th zqf8hcoNFX5KpHM)@#qQ=_aT}TY!U{8`Qq@`>IXyFVKQO(lBCxV_p1U;&~w0NKuXCE z@jFB>_3}w$ip1C`8chYziuq^$nxQ3)E_!YAAm!@-*_^y8--0MB2zZ85Y)wB_Mc26+ zQX9Dsw$EVjEYrju7V1IT`&6hTew;@9!YM!96rZ?F-qX&_b-Zu~OjSm?TexZx3P{ zpTTQkBFzc`8S{?Tn)~S_*OTUhfXY^#el72D{I8a@_XD`P;?h@>PK1vsLgkSKGluBf z1C<8cx{E6O-=nW~^APP*1Od`roVZQaXdnx`fs{(V|6h_`7zfhx(x ze9z3SBG<8iPg|P9R z{wgyEElgRg#`o=oI<1w|1p~I3M~?!`$%a|p*s%0-?V!ux4yeJbcNqTkMFktR#yu5MtnR5Vx$YNdLm{>s?*-6|F03;i zHaOA^RDC?!^)HSQbHGkLHGlCTsOJrm!spA>#@X)05Yj8uGO8kmA$@+}tS=POeuKRr zk;MO>Cd)(PYCF0E{WyBB)i?+7#Wg)`R6lYx#MVAhAmqzis!#9+^b?<*HW(37>-{ zwkw|C(63Hs*CY-mSy?|MUdg_{8!BS0y+Crc-4#&tIed3a(XaCats~qN?*yZ6X21Du zm3D)yL6XnFr+Xstd*vh6dvR;3Zc5r04AQHyw< z?rB3WAZwRV>+YX+5vCVIKCokT4-;*x)GP=n^=eM2)-&=f2q_3`2xq*=CBpKOfqfT$ z|AOb1-@2_i%0t*$|BU{YJ7~sMR!ecxzhk7&94ui2R%~itddrvluwq%8Fu5JcHS%1eCo$pwOL|{Mk$_)|A0*8rOO05A z+{$09Q=?EN*&@&#e1aAhC>>sxWl`$(NkCCr-?tF(mNN<2kPW&c!4fN=G7X1UZlakw z*@6MgLy*QkQmPztP@SPf!S+xWW@KTR57!BE;1Z|6`4N|RW>CI8tZ@^|rcH$LJ>W(I zYO};#$k4xA(Le^8)s&SfSj^oT^G2<2>A=!Px1O`Q-x+h=Yh-TP68rC=zQ4<7h9C_o zyQy&I>5oMp7_fzN8Zw%2Z%rtO(GhM8+YNGt9Yza=ZGYYvw%2`Ia<#XW$11-u7PT|} zTDv&Cuy3L8arcjIK3a^bWd|jz-#4}Ss_@nJWknClSjT27&MgqdA-)|?M%mmP!E*h2 z*O=nYWDTkhtr33tf#>F?{ywL0``aVaM9{aIfBIRA%@`?fKUDPaLj*6`pTJAcYmofqS_xX1mO1+T2K4l_U2Ud>wNv)8XD z_vL(ntZ=Vyp;5xLp_H-S%x!%g!`+j+L8b)vqqPjRXnR%E4}NLwgBMSS3R!EUbB1T z$*+^GeOALS|C3v8<57O}s(Rla=?wbHFRa6c74ALHFR3l*CpkAT>_<_D8csH9_pHKw zL0xllyhIq<7*jNXW;Bs?COXbBa4oLes!i|~q|`5H;rZPHDDeI*nQRCD7rMSJ3G4f> z&AH4|C`C0Fca~=rkiG@|%H%i&C0{zmE|R&bI`B;MG5?OgdlqR}YGu2Wb?&Oa)oq*Ji%$yluQo(3@NDpK z6VDrBD|0VO7)NIy4)6HfC(a zWf}d_G)dWOb?=6bm@}r~-XeftYFZq-y)P1%^|QXDoY%@L+05^WnK=VSTV0+M4yar0 zv)wSJ$DV|qHJnRJ~T=x#!~+s@SzP-p7d0jV_2hl zt|ukmcDr-=(Rx|6+Fr|f<(?r5>vzId$vEO(Y`KG-viTfe`5S(+y%*Rf{f!^(b_~^O z064isUczs_Dt)}l;Hs-hNTu599-^{D7t7VL0>>Gyvf?ys;)3N*xLgmh>53t)5PDgj zto0{Z>QW<82*{JGj6ena|bQ9`$g8uVD&#Ny_1gZfEQ1(bCO(5RoPc-q%jUAs}(CyoBoCc`a z#q_)jz4da5|NnI7U{6Y0n z3BdI{OxEJ>d#9?_3aAjSd#b;1MEL;BeFt$vPAC4kK~zqy{*ED2o+z<=@IsVE{~gKp z#w|StP!7!nsGd%XwCHO<-fmk~ixyT5@0cX#EYnaLxJK*OmhzM)C!@}6!3XvbqF!^4 z+Fb$o{2KIc{DyWG3yUd;8L&G1yQWQOsZK-04=@K}3J|ZJ4RSL%?(qtbLI77!al10i z{d~9+40M%M--YMN8qc-ULGQ~ThHgm9b*lbHe?OgOYF}~NP4JH z+LcdlQeU;eR)rC$X>of@-KEosaO5hkC9*GGBtJ>fe(hN=;oNNtAL|AL9@JHgBUIIR z*)O;*G4rhL*752J%>b<+9PM#wlrb>}8R8LYV7_DfHX;eOAl4N;luF&23V9!s7Dyfo7 z@;7II_zv5Vdb*>;vkr`+7TE4Rcz)=jIh4W<<|YgRm`rTuTL9_+>~ooI-F}+Wj7O32SuoxM$0n}@cW-;(B8mntHC}B@e z2@KNh=;aA%t_M?oz5)zz?n_)B-E**cUcP!o$hYqqEHTSOK5mdN#yut-b*fV zhT-$FlY%Oc2-Aryydz?5c68bX623*gx@V)EuObH^89vc;9Zye&(||t|NfynGbzQJM z{MY_jLOYq>uyH54F8#kMhRg9}Al<{1;GHhb$=;WFPhmZhm*p4mO%2B$*d1=T$eTHH z$AQ2D;C57YZlq1g#pou>y!wbvahc~NC3H7FuN6)s0Q#7**W`X+?r7X95}s9pJZb2p zp4^$)SQ3@vlyq6)c##+0eYvQAxuHZ;`?rTm=I&NPYUXG&o&Ijyrw%~m2NqtQSicV zE`t;1Py$=~&vWsx{~{py?U=8wYqqwd?52s+$O@xWHLGXM7ae!7Ph*O!SOArQlIzK; z8SAzTlKdbz`?RO~f-{Pdq>#8%eRba*{_#?U_0k>>#RZS(!Q`<>@+lR85I|yD1;26F zR_RvGIjb&%t#G>~_5&0tVE{AX9ZB{vft?3vK{Y9D>u>N_YfO0+bJtMsBZ1!sEbzUUs(dX6Y zUt{k}s;#|X<`E|OF~r1S7{9jPcB^BgKc>4eQ(12kG>kf&l1`}xeqDig_k%RbvgyFiR3+|v>pOpQzPsyJ|1Q`xmDvv@M{` zme}Y%Z^Dn!ZVCe4(-lfznN_<(o62uBJDC^K19d7+2j ze;-QF=qP(PxOvr$1xpXA{Ns4g!Z$+N>O};K5zIaKgB@Bsn-I7^MzW3X{-T(WT*SSY z(7LhlVOb!|wl2jjp6Io-XgtxUL z8I&-~0)S3G0t7?VF{$T3w=ltuMZE=&m6C1iv z{u!*?L<0EFB;ySP>`!7-O_|UVz$CfxLA%wnYQO{S`aODrT|Ix9V%~{NJLWaHDNUb( z|DbBYH~pA2*CsISvTbf{=7u3z(_VFQ^T=*@tp{eO3%i?$pD<(#E0;&;2q@W5ol2g72Z&jzs{sh7d zY-nukW>tAS1+D7|P)It4v}jlb?6XM>%Hi|lSfqOeJ>gjY<*EEZJfqw+uful3tz4J2 z$Gqx(Wwx(_qoB(7EeNP$n@K<}arqEQ5=gh{G3vLCnt>YXFpBh|>T1k9h;v6+y^|@g z$j6kINH-dg7Nci)Ld%FgdxZx_eu3If-z!j%fp*#-D0BpvSC;w*#6c6y}7 z1<;U^4q%lmMs*2Q%jI-j@2>KxH}{1eR(w$4CnG(;ko+Yx{tXpfjbkE!3`YP7$Zb2F zlyL*A6T2(%A=6Yn%@sG5n)_4*l{vN5H}TCo!bGvG<^a1#X^~{G4A( z11sw03JPkZ7Gwi#OrW*P?)koV(KaXfCg2J#f!`d#{b9@ob0K%S1gv zRkHyI!29>`51$)c*2L@GKbjY@jYmY;G=8y7D zz`w@MVh7|LDh~z1O4MU)FnLuc^k02~bj^agfgtZXv=Kw(QM;xIw>BBFwT-t9;kQb$ za%wG_mfC5HH(WesVpyTRY91^!L`Q5*6C4?_k#a-OJLb(24)K95F0&^zT@=?q+V!W% zXu~@@OlfB}&dN&Hs<;dLl1lsZ)3>4V`sC#et0^+PhNj?gpy?i!U? zmbVHq!$RK8=(3vBRojF9kKb8w4z6`6H(F#ew***Ws<DEtE6?fL;O}4AS+}~UrLX^FS z!4Z*`Zb&Tv%8@>RM0jF-B(MQ#_eI@$>Jie{VzO6Fh!@h;hMM%ZRG>8&F+am@$E&4R z#eSUKureniBB$!`OOG9rHkuYayu;F*5G;mo$_QJB_^>TMo-$yDEc;W|_9HDzkiysM zfKT!=d2N9F@kGwl!;$R=RCph5q8$j?=sIuLy#hVxOxIK z^zudU2hcL0SEY*sQpr5TM$2tJ8bsZ^=j)V-_O?p5thG{$J6Y~`DN-!LYW1Ol>q*Vv zRt$Sal4E+j=`1WbvTN~UW%tRZYuk#sY}ZW=&{z}cM&t2-l~Qj?)3t!;;Lbrrs~oXE zR$2BvB`n0|K;hg8il(QfJvn|mzR-FG*~34Qt23+Tq!0Zo^lxp*-m=<#autIpJ`Jj$ z-+#Nw+qCPmvxFOWq1Gs55r&Q~pNi~3?Gyqgm3-Ttcf z7c9j1oN!%ufdZSsNAa)ocNA;JIGtYf`-6b={o`J`?keW`&;ula`h?VOib;cQwZfX_ zn(8&VM^!w+(7Wq7%SV`H10?>1$i`G{mp?IN8{59on(|+Wp7#z6gDEt%Ip*k?z^j!ypL40C8~DO>Tcsbp+9i zQ9%1pv>$i}@wMj1J9Hmx12{_JZSRy7?H5n`cW*YBlhb>XSv#Z|Nuu=Jq!$&H#$Kvn zCXwWFqkO9So-Wd!O}8aFa{6?%IVythXV*pgN_)1gnhy3(E`7Y^ajyx!GZL@4DksOL zzQW{qe&kCOED+ODDJtWfiOp8qSpTNS6fKFl3|q)4JsoNOviiBu1y=kfYeN57Yn4=~ zcI=pHxNAv>nMc=tV0oxy_OjDNYooP{GAj!6w^OkEbeEivq*qB2LZAI$;dcC3lm~ZAN|HXmE)9G1*rodS*8I}VyX8DPX3zMB%078ts#DZ?y3}3esd)7M+ViU7*N_? z201GoetKEpkODT&ZFm10p$E8RlcSrYs*D|GJ3(0j_ksflJ~+1&~N!4xI|aqhg8C^K&V#o5@O zfY+BIrA}GYPf`(*QE88#(8lwU>xf|?mo)ssf^|J!-0@zf;RGziUdm&Uhk1Q1XO$PBfx%_Dz?#9xoysT)2C;~ zg%m5>ya7@^{m{LSCG70TeY)og83p~?$2v!8OrB}tF&OWCPxF{d&?qKuB_T0YZYu2kkny-}_L@tHG3%0BrZF`U@mSAJ{%Jf>c8Ht#DCq<+!aFS> z6(f;UhXgoxZAE%MH+w*3$o<3JL#|cRq2DZTF(zbD0B$s2CIA?i@G^-VKos$xjTyPc za-o%7%@2EADu`TE9~6&qGa1;Hf~^x8g&%pNjO@OUSR zD~wE{skIm=AnSI0`wW&H znv{&*KCB|;#fXC`g@qqfw7-(a!ZLyn@RfxJJ-gS$E$uO5dsxi9FmDxBd-d@Ndl%N% zA5|>97B#6YVRxM!6FmaAr1xw0lu=Bc&X+Op2U7dqmWroP%B7X{afI`52`7=6#c6k~ zRnQnvVF!c_uXIV^ve10@1B~`khBVouWjQJJ34w2VDS9u2>9Mm6 zobt1}ip9^^&crfe)GINpF2`csm&-jriDBoYTsHkwzG=g)-5V1A+x*|W1lbeWTe9*x z+n=giA7lmWJlIspiXMAbJ@KVA_RU7y2!*!x{-Gxap6Y4g$-cfLEng=%>!dYAN|x3X ztq2?w2{50tMn0zIlDCTJCWl2%vO$)OnNPp&WjA*ON$}Bnn9;rC6p3{qErkY_rB$=X(0dCgYkcV;>msd*s&#yB5vBA*)N@Til?UxTCCk%xfg$e z^~W%7_u}VUh>XO?0iyiR+FDMzSRFm->G!x6X%N)nm?0@R?TGs2Nb)cLB(Z{{(6`us zb?;m9>Jv-mTX9|n0s{_&2(C>18FgK|b?w^ye)gj>)knJrYUCD#d+kr{RY#2-l;fDk zHr-rcGbWa_G%nD|T{mL@Bb?|6>reZ0yrg5^hHMb|{!!vnbytN+_`cdq?SOBl9~8`U zI^|Raq2I^KD$riE*)md`tAC$osrC$Z!VlREkCt9LCmB#=_kF#Cqia%c(1rlW8D0`^sNDhE~=cTyn*t9whEl_%&UeBn*1^vX2y6e*?#BF+xHELQu)8@Rf>;3 zB(K+jT&3L1hDbMCAgEm+c(j>XyR2 zwoW?v?xlxcbJxd-4b+5)@iXjdhg4Dq)pH}sS=H#K&-KG&QO6BjR-lZ5>bE4RI7nc4`bI$H^2g8XCB?V~1d`Vh>+2 zO<`eynRsPW+OC-qsF-g8_GjX}P_p``d78SEumgJZE4AqAGnX_*b*@zTZ#p{KxJY+< zSGZpRo?tmV^MuoKbhWi`qZLtzym5ury*|kH5sro4ZltH7O={$4Q&F)_m4pV7|a| zY}yj6i+pr-?Bkj^nIB_f|6SNWb9#+yihr$ zRL-3xYp<>ybdR*Hu;LVp*+;I`TofEhz7)ujB>9T}m!KlFlSvWYnPTJo#Tzu02#UO< z@NYPk+4URjXIU$6?I@|)C3r_jO~0^9pv_AcF5X2Yk*#(veRcXj6gM;886`ZxeD0%b zbhUV9STt9#|C^wwqV;rc?#`mTe_!vCI=#0oR+Ox(Va~SkVbjk^4DD3PrH8EgRiB0t z@gbgN9WMl=OFXhubESfP|7U-SnzV`sc57$Ll$O~lz0s2xxHF=uDsbgm9(?!KwB;i; zgL2{_d9`Nxe-r^@(x6Qnru}PFTL;_pZ59e#6(4OK+x*;XiK#AF%G`*3-A|;K+MeCs z{88@K>e`PWTQU6|r|%&O@u|}D#P!#OE7e_Dt5-gfu^&#*S=L2ArG?shT*({0+ml+- z#q1YP8``%rQWxGZ^_3ST!Q~{j=@{xoCtlQw6aA`;km~z9B->@q)=lDKDuVjYmeWsB znFC8Wpk8-0rEE~-)ySCDF*v3z#QbWuQt&HU(eX8!-A{NGXwcdl6q?!natHMKZRY&9 zZ7p_Dy|#R=;fbu%RX2R?;H#Mx62GKQ8zo)|CTaF{J-vOtxttvQu ziczZw?MF!m;Udkdx)Y1=%Dov9(kzTGU7j1N9VREs|r-| zT8NKHzOvRFAKt58kRMS_{huKXYf|_hO4upU`kI$09SQ6Y2{v!&IhmA<4{uE`FUWiT zyAK9sz1lM&ZN{jq&?&+PeF@iJC#ub5b>hk8RVo4VD)Vs_Kg^2hrf-+Mj3g`gg{*e^ z`C9C7@6gP4m7&J^!WAWYXzMJa#^w%s#^(Dh{~h?^PAUmmz(=zjZ|&W#^8A3!mm&3i z%Tur5_6u+4*J;3nW}a$mPdnmbzbbjT2E7h~s!)L99l2 z`0MikO~ynSl1 zxv9CRRAl}xfh(y?ZX(+xU~dUr?6@PH|2)TB8u{LZt6tm%f>6sgRy8~J0Gb}S#L7p^ zre(?T6J)0>bU_bFRdDe?Rl#zO&sv9t0uf#CeuUTbXY_(J@gn_dMK9i*`WKPpUE{a+ zknbDFf8Sg^st>Mb0a{J-~Mwo%euBNP;*Q5r^zpa`g>0+J#(N7<;vxsooK5z!c1eb(At&>&qT?unbIKN6jV^eqR~BGf40 z7D-=#l^UsCl8N&&hjae#4UUX=dawEcZg_Yoh8TR19*NsKWchgRfcN4r(z+m6uI_<$ z4Nwt_vm{iL96hW9!*oT0(_d{RsH`wD*Os%gyRd4UxsLWJZ+BhguqvBbUujDuwci)L zRbwJ;+yBgWmF0V=`p51+21GxSaT}4j%RaPj)-09jXp8{$#6$qdv8BVR(&%+ZsLmB$ zD4p`~($u?M7kiHXN}QpCF01NhaO=zwM`nzI(fL~ydLf^=>zLUypFa@+sIx9U90M2Q zHzY1iH)L)Vy}4XV-nmFua!dJ@i4@jm?*r)p`90{~#jDIK+PP{oq~;%Qp2Mm=S>w80 zS4j0B}aP35i>Avt~hd7s#{}y*u?0o0`{3Ky?}#?eUhDbL2i71a>v zZR;>2y_`<6#m(mg8x3vRm<{S%;0R6GShZQ&o+c#_>i2{44G7OK~&?>unm z@&zjusea=5+GAADY$OELg4=ab55zNSb}hmATyj_!um0x@jfINahN;a~)=AX}+CasNT4n-GEarexK9 zBxV9kafzS_O1XcDqf=@Si28!#c3p^%`)mO25j_4akNv^aD>@d-D08y7e3B)z4yx`> z*&|BO!5t@06J)T^baRq5tj)RHmM)b4_`fw zei5{=?9@Ex>WXokvnyfTeRLM*;IC> zPfNqy_6`RP+oiij0B4#&4RoJ?s1-aE#o~xIEE^KealLG|% zAKntgD*{R@^mitE_6&H5K4tQq)!PN@YGuQ)0&{~K(8nBft5#t-zN$>(yMql6@ub`W^EANp2>CR;Si`$&l&Q9Q34$xANz$4v()41!h*kcQL} z(+;fl)WS~7!r<|6@+yY79i^dpyqvZRuSz)Qb@EO|ABfobyLnv~4iN!f_<-tl$_`Vi z56kV8WEJy#UaTODl&dzxTd+IEUjc)_EINvpUA<&|9Q3jy#0N+6k2;trGDPB4M0SL! z?kSoj_k3vbNX|jWGAvy^%%!(nSMR}z%v?FPjJtn$!`1zBUyn?A(Z6;Yj!97Mw#Vrc zc^e?-Xxb){V?Ygg-3(cZjyFo$t-z-GkwYSiCKDwYZC$dwI45;7Q z=oBqr`RC^4l+FlE$PI%vo-m36B2-5J{PGKIT!WKw{7-o5Ox`!|7%u!`WdAdPC(`G)~^`> zmBwoC<+~y*tNwp)#*I%r!y8dRkGukrL%P|c(z^Q&#>>>NX?dkSC`k?ZX~*Q2$-b8r zqOB|iI4)2UtvxXwY;en#=R0|gcMoLn=k+gD)Ph9kOkje@_7PxsIJ=38EyBxIS%VN0f%YS5r+)0lO zmDdxG*!Kh~R0zP{*FD@0{A>@7;gSCXr&%2=FSuB&fwAwJQ7Qbsh(&K)`Os)yP`Mxw}EBXX)^dJ##D-dYw?(^+RA^}*%-VFG;QlHg_W5D`@N}zk( zRTD~PYgKL-0T)Ph2cH64f(byIa%tzi5uP#e;7v+AEMfUC-F#j5BFk(@;UX3Ch{bA4 zedyQQ*9JL_=E`gi@=a;>=s4K|ZzdR2JKDFY>)sss@^%2O58DiLed_;RFqdGEarBn!BI@MR?-4FB&yFHw zYa93Q7_Y_q&@m6g{{6c3pd^)$;6s)_lI>mu`S54rg$eQlWjYq*;~|JJ+!MtaX~fps z0A*G$|43&9%+wZws8~2>IA7bMyFtZOAb^5OPT_w$f{>G;kgs1^Enhj8)UlXYcr*W= zgz@vGA#+da-~Hl0fjJ*S!1%3P5aS<@y>1=# zC3sv$WvU4~QH0w$f4!m{m#cQ%h7pqX!MQ;+awprk_68~u1==>fz()6+`l{i$%{b2a#a9{M zo4WfG#FG<6UeSeFANC&Pp6j^!%*V)zeMps_3CR;O6I1d#ieQHa#CMdK98xdf*YOe; zXdQ|@rl9Cdw}t1W#ll~5Cdk7*3^`b`%Qku|v~eVEUu}@c?2v!)OuEehW{z975`5*2 zTMr3Maa^A7JR_ek>T8edZ5HiA?(|WPpS{^6zxMLL#ev09u2pTuEgl@p|Jg+&-aPnl zx2)6UWjN=v`#(erfid8JAkW?nQ}EF!k~$Xjbz-Z-B5_CQPRaJQhHn}nf=zsA$XG>v zm;ja(!&{s^s5KW(;%OV35s|2k--xcOXZBRO%DnCNfcfrUIjKwlAR_qNnE?>GaJD?h z#jX#5R(kQV+qAmV@3nF_v-rqzW6|vb-FGsTm1u5H!n%z&vg?3hbi4Z(ug;8>37yON zcKX1gYhAr&q9i$QJ{xIq*k3lz<9OqmsEpv5K}SUtb8+&VTw{iaFY4-rFgGUu~G)HbUy@a zXP;%e4v4COx3kT~pV>5uegx0y3;wxYHR12km=f@)F;Q@DINaNyozk~Zi8ZTarm3R( zZDe7xDYMMPh<&b0g0tSsL(bb*&KX4+Uz#(jNasNdiBty>KuV;eXTudSMdIr4OX~bn z*6J-9Hn0RNJy{mP`Tmk;uC>DxfnUVEi?5U{Hzdhx-zr&B|HBoEMRib}`ry2WR%?#M z4YKQB$;^jGd9rfZMua3H;M01=kOfm}7c*)?qz&2LAod z;RCjwdeSy2A_H0oM-J$@7h5%H% zd5@e!*ar{BkebEsNO-EVYzqu!Rmnu#{pZgscz!xHm%&X$ru%VF_bh7#iQ~(|KlCh# zTe%iGv=*)#G>+XE-S3IxexfTLFD({V==e0jJx{@e`N^F^o#`2fz|`1#d#fLX{8Epl z3D5qbktlOZIOkofK1Nmj0?NtZFTG-OAJY}xZhy;p=Uk&P1NKc0x0JqxCwXX;G+`Jj z%c;beA-8-nwaQ)SA&2vKofehzrxVZVA#<@Hg?X^|)Pb~zz{&iXZ^jis=qB{f`tFwa!nrOTsW<&7KfZ0-`u^jARcT*8yZCESBG`g7=t)63C2w!wI6-0cp?7`&<(qh#qiFTefJ!cqhwS zfV|)`l)P5EN~*`VSz8dd06I*_9ozL$t7pm*W&N)L;Ib9{hYp4Z#zub~hW4(|9t531 zTzt&yeqsWILnplk;;}u^Ok2^?pE8-Q*=C8!$r%#4HQed0%dw?ZLqO9wPxMYL1ZGtBsOtM zPCldQu`5W*lPkz%vtHkAplJn@?R5S)&oxoYWMVDB1VD?{PI$6X&CGS_;V~car5>Xw z_t`DpVhP51Pn;pO|3j|xU-+`6q?!el;nEo-^3(&R%)ez_OP^f@VRphXKSf_517t}} z%dQ~dV%_=sakB(U3POw0xfA$QysO&EfzyZRmZKK6K;ELW;WbYze8XZmd*b#={qkce zW3;C=v~o6oyzliJM+x3(^ZP!LkNj(?^b|U($4-((;Oo1%XC|NuMhoUKZo<`EKrq^M zr1eY*>~L^5&`E}_5ZTzUb z3gzAlp5^sWzebZiW9jpM+cP)|$UL$O&oYQPa#nw+Qf8A6k3q8}d9vuhi(!#1UWhD1 zuJixb|B0i`8=SsQs!mws-ud2=kDXTp??{GA%H=z@{CaPLUHb6XlENtJibG^$o*pqU zfvp|{M4i5=anX>y0yon6>-`L6eR^5`PoprXFMYD57B=g8>FA|~n@Rq=zpo_k(V{jX?slEe1rzhv%-cYYn`|VEugzkmKlI;c4(vo(Gea6D1MX1 zLzlf-XKuT_?eiX|e@4eKVEFg{YbV%b4(`E9r>rkhpuSNeTNd5I`qX(!kHoANEm8BQ zq%Icr#f0QD#bsi?OJ)2+K??5l~P ztg6zzaHsIz&i&QTB>#I1ZRY35(wQR<{I4+Q+X{f3qI-)S=Mh{T+-4}t*1JkX_Rw^k zc~p%xXV5?C(R`LzHV|9R0d>CCUE)y_kgyvWK=m2tfP3U&1Q(|d#RB*5`UToZeZ9=8 z7LEJgbO!qPFa8g;4ZLrF_-#GSbR~%^1m$p3bjaD~hF)EH(RRoA`;?i?1x{H}RK(}} zi5usy>SZR8S22XUff&%8I+WRyLP+DZ-h3!jHN|z`6C3(pBr4WUrl{xA%f6 zM5KX!;UTRZ8U@iK&f#)osojZldHf_Qx-LserJ}2OjGssPLcqQJ)W0Uke)i%-b-52$ z21Q<^1^8>yC$^7h!$(uu2p_X3-MZJVPSsY=VsCn;Y#1G$Yp5SMK9>*=j*+xlCNz`Zg|!FHi&3ZX)nurF{YvFYjvPpCFoi?v zy+;SVGoj?|cnz1uSRjXL;H<+76h)RqM%sS8vs9tJ*AOHRX&1;i2zFh7m0I_p%khN4~k z+p1J^KpT5qcR!qDlhe8iW!2DK;vv#^quwYGpgd?0pr~|tK;P{fX0;E$Wz=K@bgll+ z2(F(JlMHq2{CJ7%s=$Sf;Kuq;$uB|h^)HvQ=^VPX{A_fh0J|+Zq34#Jhd(fAvRJ-t zAB_A^Rq%kCyG#%cf3Rjn*cS961+pqP zd6DUNCO^E+*4rcsYVwXQ4EKwlC%%mP`Yl?~3T692bY7#DPFiflXB({3d8lhijE;o> z)13js=#9o%!`)Bn4)eAs#JR}s15SP)E;5pj{VIB)!)e2{=Z(Z!X_X_)ai(vn#Z~<8uVMBQuzhd`GpjsCA&i) z&puphe4Z$;V*H))yw#%)nyi`e4fIW?a=|IG~s@iBEB=PP5ZAMYCi_*oz|-8DtCf#vpn3X zeuu^D5U%AhF8~X*D-+&XY~CZJbc2iZ!DvuoIgsA80QRS-+;7DxsemzJzCkUk5vTXVv}6)XKP%m^;%-svRMty+r>fqF zUJ3ha-FbHkz1sEMA^iPGeK&Gyt;lIGI42CoNHu4I1TGm1O0frd$D>-3aWkP7J(x8$ z!bpJy(a_Q7f+)OZ$DzVDeQ{8I*kC=md%DT@7L} zy)-1zDQa78B%@>T%Z>0>AJl3|B;qGcI27|MA9y+|RCCExwUs}oG48n$e7Qzr>~nrp zabVc{Tz>bl&mSHPmA6+ZLlj<5{XLMjVMa3hugVv%66ul=Y1Q>ft9>oK|Jg;dN3PkZ z@spr1Yk5N@X8w0>XPi^bTS9(W_@?O)7vXTPaWQ@O$}hwNdW*SYetY1--{3m@;Z}76 z7MH3!cW*?TJ40KYEHYD%8mJd4WgZk++ZFEYlCO}O=x*fhf=MZyq9QzRN1D}N82d#{ z`1I0RnDC3KT8G=s(yU2kJfXE=LQ>A+zyX9!KFp?*^YmdThX##dud$h~x5mzG&b%Y4 zuUcg4W0BTOxPI=teB3Yhi?$=5lz@)#7L0xDT97_g$TupVQ4ln=s$~)PtlT2oJb&=c z1J==m`CTnX#W#zgP@M(gPj5t@lIa!KI24(Hg+RGI)W*I*P>wIha8%;_ufZHoKw9bZIhwQ2=VSfJdSGOz zM9hGzw_t?Jg;WJe)Pvo0t{s02Nc=))SU9g0qQnQK+izO>a-&#b@BpY7}Ej(2ET`U3d`d@FX4rg_!NW z)Adh^+w4iF-CPDD)8L7G;ACMdh|M&x6~rRf^Xwo0)Ruk$lGy-R7_rIeoc)$MuS#yd z_eN8-9%|~HyF6V4hu&3TmLm{nB8s7^Q&)tNBCGNQzN9(p4EOTzt^yTlu#S)0n=WP! z%~d3w%3Y~xI6=ub4#k^sxBM`cSct%ZTm?)#D@@Zz6&>+E47O5VI@5%CQbLkmMd4Qs z+x%4?;@w`^rPByv#)!#c5z+wWCx4+@)?(C96x1U+zmTH{o!ZeXMF4!i_h;UPT7NeN z0X_Z9kulu>v8FH%S}on(&R%~ zr-X63XWhE6kl9w0H*J;sN;ZI+3Tvr>B*lT?#XiDKMT;#mLgN|9mW67*Pl+Lp9{uj= z4THT|jGay!OSW9_Loj6uqf8k3?XE;ouzSkG`K)i|T2<(k^8%ugph73%xv|6Wc_M+J zF|V7I$9^*>#@_{z^E%e9kx0T>z`}V(czwd3B2=cl_&eVKt?L<>RhM z)qEPco!#OLY+hv5vBS08RJosyn31vZiRZF1yWs%)tQe=~=;Q86M}OH-;1P_@xhRhM zYqbc+VOpq0+2>@x;Mg0*y{?DRxZY)C%z$bqvDN5n+^=$E1pC_FNQacgdX6E>M*Qq@ zgO?6oy>N}<^$tpd|47cssfniW;@181xO$HT6y%BSV|i7qu2MP|Oi1Sebx%?elMDl1`%uQC z-t}vBNoj)fUGCM-bVLA!q(ldCDoKMXWD@Gs{vzpD+@!AE<|Jch23F&tvyIFXGe=%{ zS_MjaV;{i#CslAsUqMah3cS9%#UF64s*Ie;F*vQu5Kc2dvApzCnjp+M%k_kW;Ql+OLr*%XdqnFqiD^p)!ZfZCFY zpm86DqHHHA@5moqd6iVXyan3m$zfOp>V1B3k}UN@m0l^%HEWKO;9 zL-~vIV+azbK&gwD9=Q?fGph5FApBg!sT=f!FR|6z$9awOIq5y5aDcvPzg>UBht4m+ z)(Hd)Vs}aWd~|5^i`T#Oc8)J}C9bHl{-cVnm~k3rc`CWj`=an?jHVvKad~7k%22;u zS(Jt)Kt#R^uk)BFn|xt31NUdcI$XC1f?oK71OFVx*GhGsuH2KA2tMVYZ?*>u>)b6C z?b!bmEk#P?PtPqqD0ILL&>36BL4bO+F*B=*-^PVOkkiUUse2Esj_$dQ7f0JCKS(qf z^3NPzo?Cxh_m$kYNcd-$_Z63wXo-r!=KlmQkX#N(Ue`>DquD%~3H58AsgUyw=Aua+ zM7q|Ic(Hd;E|Cb)G&W~fU9Ko({?CP%gwEi6DTBEYP=K2 z$;XcQBrO(lT7zj_0wxml5f`vskK$+nS&wS-IQOOLAoqVIYydZ~w(upssdhnHaoqXf z`e2gEIQ^+mszMf*0N34@8o9D5L&F+$n&br^$MgPPKlTTL^EdfBeQWZD=krH=E{jwx z_W*87E0_{?q$r!RI75PKECI!w(Yfy^qTV%IR1OZ&h}3iy?f;rY8u^0J=|peXW$84k z2gimdZhaX&;)B)Zg`tfTT^3}r2g^J4YPRTEq>cgq?9u=)+wbR%%`rq?7$IJg{Fd)B zQJ{PKe{ekFIiNC(8iS#Ul|OFD!bn|$Nv}#Ep~e+V5vp+ze=NuX+Cf$R>svcgdcW#% zUktI{nHGWh3@qZu9G6BY)}mTw8CnvQ`RZBoiUUL0hy0(avL7tt#g=T{*>jefy={>ZUr}?&mRCd$qgcv+mFOG4Y}}CQzw20Q}-crAm$nF zunfnm;(?0_+n0bG6`?-*3bdX}N!t3@UorDX>yAH9ymQdy{YR80uR;I(bfJH{FGvZE zI%4@PKz$>FjQ{RZy4Y5uJU%QAEvg-tW}UU7a7nqXl}4Mob&Iee3uc@LFDl^hx2PKT zqJkN;Ka2_HrWG|nhZ}~+mmy=d)nOA`7tx*^L3h6UYbV!O8T5r!1q{@Ft3OXWMLah* zxGN)`CIJN)vc&FP?8n_(gl{S}r(@|2UyIs*+Ntc3wiAM@CL05ewlGF_x2SO}0M$dmm-%l*yP)i-P;S~(`;9gaSp5fdQ)`b6_@9FU_LORr(CTyE&x zv5+NzwPYTXx7JV6gh;yo)Ec*<*zP8J7(>J_3XCM>aHQ?XqAkk;k2g@p8Y!Kzb z=)%o=SfLEgwY``kaN{TBR}joQHkf&LA=xjW|lbH0RkTW%*d zG{bwG8#_k0&<6KDgG9(O_tQq`u_t=}DIv}i75ZrE$Om0U=a5ADyPMd@-LUrt2O#l@ zz*;S0`QG^yf{e#!*WNpa)|^djCIk`ou;XP%m zj%RKCT443ezLlBoVW$K@;}pOUS6V-&A1Y!4NG`zH#U@{`soRm69N_bL(?@{iuYU`QLXTYg_{~p~M*ZVQ>vPwBQM!+_ zqRdXI7aq#m81w_;O7NK%{h8Mjn#pRFNVrNIHuucSgr*@V->QHR(F(6Mr?S?ld6^+d zXc@R7^%z!bsq?~DW;n-HNK9Wf z`O$)6dS^k4?>5Vy*q!oO%0=}lFL!zNgfDzW`>ipdv%3_zuL$?Dm-2Nn~1&SkKZY3o*2F9w7H#&p2^!)ft9B=VOp|e2UC2AG3+H(&z9zPTJ4{V zt@aXik2WJ;ak~cnd8%;g_Kq*FwFJ0;^nO3}TRJ9Wr#?t{op{u^x?Rdc*}WqGSzb@N zGMC{&o-AP% zST*Aa_KyX&oqn~CsB$A5qAljn^eqmVzgO9*!IDdfPl!%QrEX z!`JNz5x?v_wpE7;YW4WOU#Z4wJTZ4uZT2F~=Epq6My(WKO`xrbfRVmpj`rZ zUZ4ZC3CVrLk3oM%BwYFBt?6NPgkR$2cBQ6Efo`_bYS>x+6$Oq>@`yf{2>G6#JtoYR z)I3itIc55-vJO3Xgm^2_VPomAf5!CNn}tjhNbIZUBmu?u=Go>w&Pt#_@~!ZZyfRcTSt=vuyMa5(#b_E&+?5-D|e0 z80xpJKxc=l!8MCou}m97%vbvPwTeG5j{I$}2M*-BEa}ZVhI8qpsTp8JPw(ACSn}- zxWo3bH-3}nY03H&9L(PZJ$Wil(#?mjK1aa9pjWXTVt(Ri65YCVO=f)Nq_jbXCI;8Y663E}y5I^ZctDAF zaf7t-=?|ISi&O?I;N8BJ>DUg>@ll@tunS;pgNXP~fmjyz&KC@W-dYR7O~%+yDsppC zTDrFg>oj+!I!11BDhOJWJ(2VK*_C_97>I`%b?QA1`CCBqULe^=m(Df5d1b-maden9 z%dz(Kj!kSV$P6GwKPEx@zsM{1n;IP#uD(}nii?Kul+u14e1Zf7`qI@#%Ot!c)?@OF zi1v)Z?-wnu+yszvJdONBwTND}hYFNhVn=VeacjsK$6vWWtN4+wnbdk=zswYm#sQjl z!XQ>|~09vj(%ZM4Brxu;aHeOX!U=94psiv7Syg(wuegBBaM9K2Ydb${)No)I{ z-BYu0k>d51(w&YYIhUv4Nc3}S{K`!pxOrR?l;(EUi1{8&_7;ilRSPM+2=*k6IXcGR zFE-lKo>3=>39jDsusY#$vytXFwncl0{APoQ+?p4ddRNt@^G`V1+ieQ)}dg??f{0+B!r}YMFa~AD30)ds4M( zg?o0pNqKD4GmGo;)Wojlubofe)7j$Szjft_}{(@`CTAO0~pMM`X~DR3P4xwET4ApEa`Diip5&#dHx(|zO^)viKi4&Ye~ z98y;VB%~M2r&G|^-K~;Z+*kj@CheYrDrcbpab8Y|Ee_}@tiizIn&a;N=fhjN03)QH z5ltKz5C}s=P;S!nQ%d)mWZ$o?=JCcc9SS!;&#Idrn$u?-n5mo|uEB~ejhb>M9BQuS z*ojGeuzieIV+;bS#G~&h%q6L?9kbbKS5MIoF&!_qTl6_cc!W=E`A=v3A!6)j!(;JA zvQ$k!pF?;{lNZ)(D$FL0t+wNFp4dth4TX}}odsEJs~{C}@ClHb0-Gf^0{v)Pkp}B5 zL%(9)2bV1_08%@o+lw#$`p2ZRr-Ysc2vE7we}@R*$4UFwO-A13Uo#MO?-4fFwCFW= zlf`~Jb}%FHD)#O6HW_yM;JvoFT`{7?SGkwS>#{`ssag`p<*akiaD~MDQ*9KWZsU*m zJ_Hgz=3T(Q+8K|e)>~JEg`0Hg+B`KrDUZHYf7m>jJt3;TMsSHzdnN0@e=u?a0228YmWe<@3!f7-R zb01VKqoYr-n@5FiwkIqQDN;Kso)s%EXx6uc299 zO`zhZA3fedRZ*e*rB&Qyxk(sP|1J0WQ&`l6r9kv9Aywp7gQaAu~H?~_T55B$ui}~jxmyO?+7AoC{MzBTd?rnaT zzaV+VU)+Lng_r;5?pe69BgYPVnm;A=PLaWjdR8$P|JX z0_$X70jrofyuH0Q4t4$~?NZrTeX&oYDt;D-$Y~Uxik38A_`;7~;fwARrG>SV+yn}n zYU(L}t4<5P)!zGICn?T32X9Zxlny8fi^3>PIAkfO(n-d+*}K?tpOwX;uRUgW|OM|%oMEM`7Ucvw~a%5<{@=-KLZ9V#D@sLi<1 zGPY*7P4b!AZ?6Q{&T;&zAlNp1tZccXHWJyVLMD}r;8c6l$jM{!>V0b2En!GCmv#9A z(&re4l2`j_(jjQWqM)}_n0HvRg$_+0ulq75h1X>3GUZX#m$lfuO`tD^tQU-_cwJU$ zJ$ARRm>&ny10v|A^=3aYi|#F}hbP4$xf8>TKlQs=R#zJqtz_RUcXJC{ zdFX&kQsTeNj4n}nx4fA+Nlc8fI1J=AG4~)*J|5}* z3G5hctwn6d-Pt-((VoJ){R!7wyllqaaIo8{2>_|vC2hHZVKg`o9wL94bu=&~%DeEg zZG!0-EwrGTt+uGXJ>1S^s|7@VAXh~bDr95OgZft3?Df1`K=GT=`vED4^ExC3Ml06@ ziv=o1J{ZtcN*vp5!ybY9u{aso@#5;%z}2~pG)2k@(xm0hBNvLAorgfespCYZnnHf! z<~NwEjI`3U&fMGm1ov1EIVjuO%(0LLmh}s0u6}hF#snVH$0xdVPaVd%6JBpWIGJac zmUDZ^u$!0F{R0N`3@ECj=f*P-x+<=~dO3z)>pYNeCAhi8#}0MqjK01Nz0Tch&kw1x zsHhPC!jmrgGfNk=^nHKaB5K2zEZgNkCd>Y~l`jF{{VNRryWonArA@~BGh>Jda9&{n zc=Q2zDW!ivb)X5`Of0qw_I*c2)I+$!}M5PS9_|WztD!N;h@= zNO9V8q`1~;_z?ASRx;4l#od_r*(Eg&;bwoCsn! zx-IjLoI4Rne*GfE1aI*h7gm3vAIr<^;S#kqL_>0L} z+a=evr^ca?Zre}C8?NE@y9+-J*#AO%;DFxfEP3ir#fnWFo@vx|R8#7o=lS}tUzRNK z=pEa`v*frK^yQ=|L95=P2xT%8`sAL2KUd2b){!?SX~r%l0JTr&FCFKO)ZH6i45;r5 z9FehGXU9$Pt%e8c{-2HG!z*@d^4mF;E^K9<%ShigXQYGI0fTN zBq5bq%=&>rv;pszhf}lWCvXzcYAk4(>npaCwwV0F?o=AIT(_Q*y_qRlVSTF$KSVyp?|WRVuJy>w(^Gap!Sa~0+_xvZgIB5^ZphXQ{p?z=-f-Ov7&|%^3gn9j zm9g{2*L~e*)&S-LNPvx>eTuCRz=V-UxN!4XXLD$XM0mj-dvi3G-D$G0_v|wpeA|<> zDRE^vtAuGP+4T~Tbh)9EcK9;cyShWePLl)DAR0ukutmQ$d*|SD@`a8J0w81o^`@uI zKR>SyMPnOspUz!l>R4@^f@;*d^lB_!8OvGWp_pv>R9r0PGF%Em%ebm%-Bs1qrOwes zMaSN$A67}!^h$S_p-0NpVgeGkGOff40;1&D`zJN!<Zyd=T7sCfRGdK>%qG>K>KbIB?E2* z;C7%v<{fS7^J%`^EaH!9&89{wsV9HZZmlLMU8Q|{^H;^K zmdnTA9w@hQd_PTVZS)c04TwE4C)C?4oIcB-ob4tJZFozoa&aDLaUcAl6}j?VM#^nv zqslFWQ+h{_6|}rK9^l~betq1WL+Fvo_nGjy?11kB~N>5JwYh-2|v{9+RXKM>Kea? zZVc?*FU2JZHaxbmCo~vHaE#BblU29=PDd?p=eBw27yRSC_O?47+d9yXo2M__k)g6*!8590&PK11UEGiLr$MEkgyJq)W=APAy@j>2}-)h%aY7*S4}Ro@(-8hT1`5?4^{T>%0;03bDZHcFB!OUwT=3<9Lr3 zlZ@BavFukvs-S@5~Ef2R|+wiMxY-P@?d$v{H+?eJquhrt^8j4eHQXFq)s?NI zbI`^9K*V9sV>+MVw9I_Utis?xMo(#{N8?~tJmk3+H-dQGf2gL^su4T}Y!s*jR;fyO zFB$$?&{9!1FkobOZV@ma+rAMU5=?{cv=lnDhE9totV~6 zN7NKbXmOlV|Mf|j*C=*?_+;)Sc?0rm2kT|Lmk8cPMM1`UwkJ`1B-kb@AnuM3N|2hp zzs!~`&uzuvxf+sdpyZVrkrpxMQMND4DqhMMdMz_5+o@wabwxp2+(K_^93iRpnbMCs7 zotL6AkQv{2dH7wV4ZFtdicQtUHacNEO7}?9?r|yHjg2X7@gdZg;hQnFaAD`Idkpy7 zz{JbX^5`3BBkTQ_)%y=FEp<%k1_`N$rm0IM>6Tn038+_Q$H~Sjy#z1);w`hkofT&H zsr8ZGGwZjA7@03qi1c>?GLf7~tR<3mW%t4l14g^nJN*OG4!8+SWEQDUASGMg9kXpK z&UJq2b=2t_Z};glMw4f^?UhKKV3_G3#H_fVM^wvxN)hq2uxp8f-uK*^rz7dJf;@cU z?$@zj?PE39>^CJ0B4X2(SCTLR`JP#%>LtH8G7nC@$*0ba$@ z*kdT`qk7YWmL)m8xhgGJ;nKq#o=o4PTUR@k2Wt7}Hj@$yTYjaK-&}68BQyf(S0nE9ui@?O2wnmoi|tQjOT(Q8adYoYL$BK&)dEk z5!L;;`Qz=TL}TCwPb;uV_T0$nm>-YJ^&@mHg`9^!gLp>5jXK0PnWX=(v2%}S^8f#T zrRXE9QqJsMy`xg*P(x;`RC-rZDwLSB%Gq+t4oV?n386WplZu?%CbP+~GOQzJ4r8Hm zoH1M5?02ot_w)JQez)82k6(Z3M$L6yuj_euJYTQ-gCiH;U!~7`A&M6H5hoSTBV$I-y1@@ED(v`1YzZQL~!rlg8@0@PG&6hA3j9Aaky0`_XRt_xn zsJrt;MMQ_Q30!}~98+|o3q%mPZEc){YT|m#%g%`RQbYLC=73_7m5Q^9ad8AzTx-9* z7*e94DcAZ5*NPdOBayQw`I_|Ib zd(j-!%p*v<626|j?4e4c3dtyG!s(@Fy zMIdatkrx1(>M^p!QWsoiu7N0D8#!XZ0KX+GZ>r@9J6k2BQqWR9{UiBkF=BMUaIiJC zv!k(b*+_@dv$l8mr&De1b4p7y_*-GWz+nPa+|-Oy9_Jrux=fX7+4T@+KY8vr3Qs$h z|K;yAtX|tmE7gL3cH~1&c_ggck-rTsVZy@cA#zw>@wvQ14(*QpQ)VtkxSg?@v{*8- z3)ngd511e@l?2m9?E`AVj)Apu0XTNZ0(JCam^T0ON5M=^a473MSiOIyB_@V^ixtms zGewz((E6)Qr=^drA{^01Nx>nsnR_S3r-aO-fxYaoBVOFxln{q8=FByfJYKv*rDo0X z6vJDnP4xMi+n!hL)|U8FSJx>miu!Iclchz;%Uz?iJrGV^rK(>eq$_49vlmjr#ul_O zrcQ~GXe?LUrmfQ5C`N~_5^T|mJ?XV;&5mm!2P{ggka3tzBJ2&|O}esroEdvB%slod zXUR1od_z3y*?WpHH9oxh?A#UgonP~YA!ty9S1l!EuE`hvn91BY9^ZF0S0v6q8`~{q zu7SOgH#P_(36i?1Lfcu3!kUS$q=|r@kNJWu_1mjZS2H)##d}lUz7@o33_Jx1I?uK_Sgj(AOWwds>d~v#bKSj8oXko%}#= zqczC09&LWhBf)J@An<~-YP~X&s?X*L&w<<%a}6f7D`OvDCUH8|NPfe zW0?Cv#i$n8(o^kz?(6n}6HZ_8`yoLNwsj;W@Xm>#dydAVF9|HiLi4I^!IX`6?{xzG z=wR!F&+-uQH}hl+_^J^qi=)OOe1Jm)=mF}tE)=1+xhK$B+!=?2>MX2E81jUcIY7#S zLsoof-nF@a(c62~2ux}fonbpY-c3y@75um@5Wk(@7r2JgEU>`U!Q{`ehEe~rD){0> zTWN^3>X+U5acb}5;VQ>&CV=_v9Ja?(hs->-e`C3Qp=epaV@Uf}2VO{KeI97wP{kyF zoENYs*Eqm_Z;qWjXrGp2PsqvdpZZ7|PJ~$pSxvaZ^D*zIUv!_o?cLHedh~BPOzMK5 zR4&eRYc;7G;b4{=PV=@Mv{>Tg>|IU_3%Z&$nC23rdOU^tt|%t-(9fU2r*~7TQ2>HG}m-t6F&X!t{`8LOzdsuoF1HfA>83Ju{C z*(JK=?W0Q%#JClNy!kgin$F4B)Lu)NC2iYm{U7740!==;1PiUkSDO3tb`FDJtfhaM zMu^`E81;LAYE&|E6XWdkI(pJ#1-jC{b#mM!5}5uQo*Xg3Z=P$is*P)C2&LW!_79KL zkrsHo^e>8ySwlnF9;>9y13fpo%*?}?kziHy@4k9Xu#w~W6e01%LB`8h#~Bo zu~b1(+@#0w)d|7R<>e(B>C59I>2qxm?1pR;XCgCMwW~Xi^zMreouu%fCNK7QM2g4q z@IUAF4EH8IGj$y$^$v#)tsLj(tGLl?z_)I|H+352kWHWygGc?k^B|0L62XmXL#~D) zsFzuwBJ1i$Fx*;H3WQoZ+N^x2r}hTzp!nJ?+GbEEQJ#?JtTHZ`x;N&~TNlI=HwON& z;vi#!LAr>EPjQdhPrT(6dBuOf4G~|NdBboH&HC53tD=+x90(%~Hbv}#gf-Do?KVyze7JakyaXvAg?)IVT;LfZ!NG;U3{wl4=x+t|H!u)u&-pg!>!9toSu^QrrYwS^+l(=TQBJDD@wk_lc~z6c!aD<3PEzhh^p z2k+A~BL(r8(iHhyW|A_EJM;10<`}YYZCxz35rVy)RTVzozvh2pV0ExYde14z zY$GCK@7@U>(}A=t!G#K2fXT-|@Cvlw`R<697u`x*t{MsoN2i14W>3;53eE~>jnUod z6KBN~d)?X)Zj|_|6cHz@oi273Uwc0mmwDbR&qRpvHJ04AH%InJ8V4oMrCvU}&!%-= zd90izzB`8cu8qcC8Q!3pm?4JZb6 z6UsNCsA1vqU;rEpee->6N97O8GNprsO$H^_%+M8#ZVq#$Zb@0eD!5z1P*J-k#Ol>( zu#eJ*`ANTtTowa|qZ+Bvr;JiqoJ-^TK95_{9tCw`>DCO(BZBKz%2$tTWi1|`bN}fd zc5}p$Tfote2D#L6p&TY=p-hq)Hk~@F3*C``|c=tF3uGiQCQT zHO;L-&P@q@yq+AU(hIewd1(zE1{B!W%m;0(l&Y+RZ@TT`@$*?=>G1WM1Fc{l-brlueznb#F=z|mMg$Jnx||^DVxa4zb7O4ZQ4(&szBOaaixZdKIG`h9v^^YL3A{~a9CZ8DR#NJXWl@uIWS9x=dVe6eePUs?v;vT%){|t z z%s+(8A(p9S>g1ar%5F-**)2Eya@p{!3HB@9>I|lOb z?DoT6t8UEx;$A-vzlefAh$)EfIH)PFwBPQ!f8OZT&gw{~uLYO*ra|_E(n>`hP(46{#&%!}5(5YXWrG503$&Gn*&J2H5W5BhihMoSZNG!bRak}(`Krg z-f_amv{#2}aY@n@j`eed8v?V}S?rP>;i5)cUiufZ&gFycPepq;pFZ6PrMl=vLM2uqN03rpPR3re-c8z z-Q*xh7_IYwZ=LG`k;Uk!S) zz2LvM+IBAp-he3q=qEOEsQ@|GuBBV${!Lm}B2#m=U(A_SP6W67wmAIi2t|$?mJFp> zQ~4P&-(#lb&n`yD<=G|D)l4A0#nL2$Y4VN6wWPU56P1lpfSM9QYpk98TY1A)o*~N6 zXbC-^UD9g7Fh~L#ktE{W(~TCUqj(3Hs|Pt^Bz27?kI3FBKQ5_+%HTxH#eb`>7G@MLqN=0-M$JiOdP zqTWscV|8#+9+<&+@nvh{jv;wTbkBvj*@WSA5ax#iaR0t>m*x7yqGTEWHpV(w4*B-@ z^bT%i<4nt>*V2AY;WU|-nQ#yV&EluVtKk!d<{yx_U(Y(XX;bIcNa!cF`ySmCbS$I@ z>f*ZBjBY);@)rXxv`3(?JLW58ZJBoVe`}UW{u=_Nw|4JEKE5lEmFiV!T z-#_+u#E;UHeo#ORn&d-Q=H%o!Ih6psn}vmiKEphXi~j-2_#<_D$5x$mG;w<9FNtgX z{i{6m*=p0FnZ{W-=W9+E^qj@te(FjAr_?gV<$2h}+^a3qMNWH0qryVvE}`TuLEn+{ zAmEE>RO196FO<22y1PHVihFrHS17(KjbLJiAd;q5|B^$!e$I5I>4d&i%!<23*X zD=UmrRgh-5kCHTM?JToulDo<-G{pnPgMQtGw*8sUUPW*T4<2Y4Buv%BaC-C1UqS)S znL8!UB9K-5T1GpkZX^L9TPFc`vlxov@0u}o%}dP%LB&@bVdvMHe7iNAzz*nhXN9+i zmV{emy1xw@++TsD4H5Gus@e<&uk}=1h-r1ns!5$5?yb1sm(m~9T#@AbQfolS&NET6 znPEoH?fhmuB)8tTj}7rwynm@nay4#XXMU;T)3KftmuU)$8shDX-)CR_Yf09LviZ2m zV_)W2C~hF-Kc^zMZhdkDq5B{1P3qLP|899Wd|0^QKOxj?(E-#3I4d%P@0`=fy!MsNz5`h7sj=*r3hkXWO-|Kw zW&lXgWs<-hi3BUqNV_l8wo>z4UTzu(Z0f2jkNzvWCYd8m+idy=B=-7s7uCdObRTI# zfms`St9eFyzhJW={1ih1|ni(lXD5& z(4+)iYv1K>Iku0H+}fKlAHU({Uurs)4dZ%}mDeTHB;Gh7{OH!K=3u0xe_k&MKyO7b z_mH@?Ofx!wQuD83g05n{N%i%Q8X9!jAw5R*<59K=jeu3>o*9}PtKYN7^X(?Unv#V4 zPF{BBWPK1ZK*KcIbU69ftMWMjYSy>@rY7QEeBy`D*zwK+p?%A6j>1vz5@zw6aGl=p zI)1`Uredt|{5fC;l70?8!_$Q^O8zwvRCTFgqrP=mJ!j!%S@BhxKOaVmQ<&t5fbQio zLWzu@AhQ0-ME2?F>4oU*vDC6~rveQMANZ~g?)R6H2+>Fi2du}Ndwp)#2oN*sOEv>e zpM=5X2KmIq8d3uG0uPkAHP>vYWEkKB#(0eLHAq1-k_IQM%L=DE*@pXZcWEJP%Y-S^ zSgxCffn;vKbVJEOL)yAIn}7Uwb7Nh@W);^N+3Ok0URps44x1QQjvrw0qP8^Jk9qGb+R*&9I8nQj6PAnxGQwfYffCwn0ZI9IpaUwdn9}<9ORbPCV`=}R*YkIxOcd@J`NH?8~ z0R}zLZl0kLt9m|Tr2*YK86>UvLqenzfsD$as<|A0*;6%>?Fk)kGU{g+d9!`p#;v_AoT4FK_(q90iJ4Ze*Q zrsR|WHC_x((V>C?SlUhXXE@m;l9P4{8y*lXa0iGoPY?0)c0;Ykw6`m3I3r~Buf6q+ zsC+lB-3(a?FtN1|#f)q3akVx_^SVPYmY&wwN8l8|1wWe@<`#Yozw6^@^F zHmB5#$$pwIH>u@cctby)f@0ZgBK-RLfHc2)414a7N8K)!-WJ~#qP?@L{=&QWBukMgR@xcA%H;GFS8@zltTuHx`)@}Qb+Q3 z7kkBx+!mhbueeYbk{zNPfkw{Rx5}tQjBapW&H`WrS^j$USi%j?Z{46mi}h5#%3)Vb z?XwjVxG6ty9m3eMa2#(56Z#0Ifnii5I(=(yy5#b*O}AfxU#4xKH?lkQp#*Kjj2I1C z#qUemVnvFck;hNbC#wr~5hnSP6-sK+kZpcz2oEj&c?n$1Y(fbJJa?l}Y2%6&T6O;% zK6uW~tlWmZF_EzdQ`S3iWZN&1!_z|+tNnKF0qtO`U31+2rox|_XX)C;Uy(iHu)m0i z?DP`W=CP4CbuU$aFI0IheRCJwE`0`U*NpTWVtjH0hvLo$B6n@gz2)>bIb;5>y&88e z{A@oxljUes6Yx`Qi%p~v^K-l*_JBaCV0Jt9Kpjo9k`4#O>#OZMyvvYuK3Q4Kz@6RP z5#Wl&j+r9gA(!X1Jq8VUZ>1->@M0uolm(({i_28cn(QIdn6g0|b{8{Y_l)`TYyiqEG_&cj6kA(u^FNx2zx8>}v`HOXn_g-)ls`u0@2 zeMT>$>~C+Zg;*Fg0&q;nf#nqEkigby0>{)emTZqQQbBraszSE~?5tegqO98}(A`#RT{9o$1{6CwIy>r;LS5r)AE#^^Xfx~tX@hxl>Fi?? z92KN<03`=d$?RbfPYBc+@KGWh?K?3~$DfTWM$|{>hNmEmfij|C>{*!iuF5Ae+FcbG zbp+}Zvfuu5?4?TjmEFMc-&#CBU6XLU`U}!^hSJxXWgs3Qm+RO>n6!chMpUx-uu4t& zs?KP~{Xgy^xV*)~!p+I5Q|l9+PUA!igkfnru#O+)7Okyg8`4w2Qm%>!=!t_RVSZ~F zuhuUAI>Cp+(RTQWwprcK$y!>Cc!!;a(G?Fc9J5%CG!$7Rb9N=E2&d^;PVC+h;J*u` z{Qg!&EYUXDKqz&fI58vw9g-xj7zjsBQ%{6I>(HLckN zYaBrs4_bw<6TwrLrWv^=0|^Y3gREslIyqAuBU&gD2A12V&N2Z{kta#H}LG?pgzA0D=tvosf| zgGRIp-1ofWXzHp56-8?;%ZBBc)=;cy;RXoZJX2=-YMUtrE*$~!S~Z>PW$Y+j2kYgv zbg;GUk4?Q}R7{WrXEZ!#4=k6d@DR(c(CDY37|{ZNqy>C@E{+nJVJdyYCgpuPA0rdr zl#+o;W)5@R$L1XhRPteS-%tM@SQeLC z5JFq~_To7hY4OP0trATVQsb`GXW%6nU7I-&gp%2u=7M%v7@_Cw--Q-3#SQPM*tqJB zCq{iQ95}o#ViGit~Uo%0E~+^bgVZO%+BRDQeN4 z!kbAqBy$9&UVK%L4IPU-83KP>=NRbq1MhD;hfSv*Cvs?+D1RKMNQS_j`@USl7^}D% z*t@UdA&TwX0b3F;F&de-(PnH1yeeK0V(l$EF0?ml`kyFr{awSME%i z2s&U=TNT0a+hMO*!K1=1$Z6@vrkS^sQ!1ebWIm>OsSU7!4kls38VyD=RVtPSPmg4hMDDkNn8gDd&N36yeW{4^%K1TNVn}eITRMlD;;%D{?qRYH{0P z&g#Xuu=`-N`_5MHo04l@6I96zUEO-X8|1>Wn}u=2#1<7@^~Of|1hu$ym6F{|qixO2 z91gTm-&+vux+3!0uxNiI?!v&50QkLDOOE<4u69@l9qbW8B%I9nbN zdVyho-LDKJS;sQs?_)qFb0`77m%d4yj7Vvp90WML!o1oqr|i0x(>j%00ShunFia=l zSm$&uu#Hb&(R)dP%}MdtQeTuS6{6G4m;EUdioH|2j86d59qqj_=Wrhns^o@yKCO+E zOyG@yyi6q^h}EA9B%#Vt6A~Ba5Ex&Ohd<{&!_gDwz7HiRIY%}IZnrYQnzb^l2IK;UmeCSS&}k*?OBpsKPYWbUrsM9quL8>~ zU0Z$j;ZBbL{bz!a3f~g3Zit^5+jnzArL}9^c!sQ{xc7+vR|^}-2WRX}LfG3YkFKF# zgySR}C_iY8Qu$wGhqnN^c)Dt()I6}SJ;?>zSNl^Lblh(slK*J2T|c~ji*osvZiF12 zaJmDTb7Yq~!PyG^r@c`L@y>Sf)yyPO#`X~3X6J$|!g{BkO7J-86Z^GedDh8H>7&5g zg|CMEcl$&u;cofRTq|yRuVBdbBb01b{L2*XSCJDp|G|V0^hzI-yVZE2+v$>}b!Oyd zqo?zZ23a*}rIxr`{xmlQYdvD7VWQu!SjTkGX8*6TJY<-I&GKI;Av=iks`QUg{NPxC z-!}yQi+lpNtgSO?O+cP=C7Gk$ueH;p@q=!1dsSVSv5)D!ri>$ zpS|`P$Irl$eKwm&5Z`y=u7MTh&AA=ehEa_am4a<|2xnW*Z;;*_uev0SPM;9pU^d^N z-BOnyN%S$KJ&N?izT|~gH*74cS(vH*3v`y!7_K+4!IcCFvg6=*r zqzj$v^;&jmk3Q0_mwcIzCCsYKki@#?Xj)f5Bx%inFy0nh5+QM$GxjqiT?yk#H2u#&B{`2U|NAiso(Tzdfa5)F1Q3v|j1s0SHl?@C-?#7W?@z;t^KkEOYkQ@Z z6<>b^a|4dEKdp_=Is=C&w);Q%@i4gZ-8MYTg<9{Y0!Eu>+K%fa>_4ZI{9|gW^$8vh z2so~ND|WA4_4Vslg{I+X5V=|dHh17;zWGWIV#vGgd6#a|HS9XO!>?S~IovVRAYYc$ z1a|3l7TRh8T8br%RVQd{Y~*)ZvA941*_Hxd*wR^o00V9VW}E_mA^vYkWEN!Ppr1;!gNn>Z%T48{n~ZNDWFW?rIxIAsG3s1zfB*f#=H zg-7Gf?tSnN*{AQE@>V=f;6DM&)&jy{o5JD2!?1mk7&e$zR)X!XWN~K_l{A_GdHAk? zg#atM>J;mypH9S8X1T}N)#)iIXalv`u}=>7^zj)w_PVJ@5qr*d7%6jRVwpP4kyfD<DnBQ?>Ej7ZBPFsd#{qsVXdlUz&zm%Q9PMP zlLknE1^))f5d(_k^T!-RA=s1);)d60o3*adzLXd*7#~SNNVi}GgyBB(P(Wpb9=7}a z<4I`r5YQiHLNGs15bsQS49#mL5YSC=jS3kjLScBpXjvaN|NXOULAYtPuR zN!4cLmgZa*9^DEJ4nVn^0q>ix8CB#<;Tg_07uuqxcz*7rsIlXP7$H0ETe(+263lh9 zhShMPE|AUWp#E|n5ds^xzE#GK7Xl>cl zb!h1+$6MLAtSIJT)ATHG3W*3%Sg&@>-8-m9rw+cGR17ebF$V9>A5=g`&u#tp0Kspq z644nws!6oH0*g#sqwLZ8S6Xgp$i|MqCyMr94iMmalY4_$gW5Vj-n@A; z0!XK1_31u04)9L-BmioudSkF~iXA7H42PHVb<}E+WCRdAg>VtfX4m9Qs-#Nc!~W9$ z-yyMmy>hTxO`nMetOi0Pf7oG>m`=!!@?I_Ew`ZNyeOSZZ;mo_2ne0|mqiaIAAGP<4 z#zF)z5#UIC>Q1{7)ICckEmm`KU+o(LOQP+7sp-_TWoH5~unSgdDzALop;6q-;T?v= z5P(H#Twd?nrka3JjHr+Q8&d7B=5x87`o(>pmNTVu#oZ_Awc*wp?W<44lQ3&F_I1?)~Kx=)oN&+KS&eej>aIRsw*uIR*uIGb9r-K9tAMMnIv2 z0C_0IGp(05Jul=fPm;YZtzj_v-s||hvEPx|Zk2RK^BvX+;C}np|6k?h-v343|G#TR gMJlUxULi+7``E=BH!NUQ0Dt~Da_Vs9A@7_22gnJiBLDyZ literal 0 HcmV?d00001 diff --git a/papers/atharva_rasane/00_myst_template/Results.csv b/papers/atharva_rasane/00_myst_template/Results.csv new file mode 100644 index 0000000000..d20d5a7096 --- /dev/null +++ b/papers/atharva_rasane/00_myst_template/Results.csv @@ -0,0 +1,4001 @@ +,Highest Ratio,Average Others,T-Statistic,P-Value,Label +0,0.23333333333333334,0.1822033898305085,-3.53275826407369,0.038562976693981454,Original +1,0.2033898305084746,0.1391949152542373,-3.4405910948750495,0.04121820653114378,Original +2,0.3389830508474576,0.27033898305084747,-2.228607614649941,0.11214158967770235,Original +3,0.2542372881355932,0.16836158192090395,-2.451612903225806,0.2465587655124727,Original +4,0.288135593220339,0.21087570621468926,-5.467540160267347,0.012025943288987453,Original +5,0.22033898305084745,0.1307909604519774,-11.145126479863883,0.0015479966208348658,Original +6,0.22033898305084745,0.1603813559322034,-4.047402698396378,0.027156785257683596,Original +7,0.3,0.22033898305084745,-11.51260179108094,0.0014069485474090153,Original +8,0.23728813559322035,0.18149717514124292,-3.7964977175244834,0.03208088709594881,Original +9,0.23728813559322035,0.16871468926553673,-3.7922455055393622,0.03217383630567124,Original +10,0.23333333333333334,0.1398305084745763,-4.975896705378727,0.015597602000975219,Original +11,0.23728813559322035,0.20261299435028246,-3.3817063110386885,0.04303714816975945,Original +12,0.288135593220339,0.18177966101694915,-3.365869501933496,0.04354367094755919,Original +13,0.22033898305084745,0.1477401129943503,-3.4073375272246085,0.04223311481214282,Original +14,0.4067796610169492,0.27838983050847455,-2.9307183932115923,0.06096526759447833,Original +15,0.16666666666666666,0.1228813559322034,-10.333333333333332,0.001933293191806968,Original +16,0.3050847457627119,0.21101694915254238,-4.384236405710172,0.02197310950253267,Original +17,0.2033898305084746,0.14745762711864407,-2.9054879908745583,0.062224127599699926,Original +18,0.23728813559322035,0.21087570621468926,-2.5909821905688375,0.08100515899541934,Original +19,0.21666666666666667,0.1440677966101695,-2.967580383634676,0.05918282683371976,Original +20,0.4067796610169492,0.3206920903954802,-4.689814564762172,0.018328326818943686,Original +21,0.2542372881355932,0.13940677966101694,-5.09630233956434,0.014606958299961344,Original +22,0.23333333333333334,0.11440677966101695,-6.328859555783819,0.007975829484392977,Original +23,0.23728813559322035,0.1896892655367232,-2.202614379084967,0.1148909616099501,Original +24,0.22033898305084745,0.12662429378531073,-5.286516953753678,0.013202833807875401,Original +25,0.3,0.1864406779661017,-2.9476070119292004,0.060140398566708664,Original +26,0.3898305084745763,0.2657485875706215,-7.979030913275075,0.004107637499059575,Original +27,0.1864406779661017,0.14336158192090395,-3.0162558762483083,0.05692805669660707,Original +28,0.21666666666666667,0.1271186440677966,-8.184904804985008,0.0038156709515571787,Original +29,0.23333333333333334,0.1822033898305085,-4.794807132575457,0.017258873277490094,Original +30,0.2711864406779661,0.13516949152542374,-9.58743044198646,0.002407749109248065,Original +31,0.2033898305084746,0.16454802259887005,-3.6666666666666683,0.03508151471548188,Original +32,0.1864406779661017,0.15176553672316384,-1.9438723809014464,0.14715294859631137,Original +33,0.2033898305084746,0.15176553672316384,-2.698151855052503,0.0739016273969179,Original +34,0.1016949152542373,0.06666666666666667,,,Original +35,0.1694915254237288,0.14058380414312618,-2.4173228346456677,0.13686029083311824,Original +36,0.3050847457627119,0.1519774011299435,-5.686705315838459,0.010777981645506028,Original +37,0.25,0.17372881355932202,-2.3302720008113575,0.10212247896202177,Original +38,0.2711864406779661,0.21949152542372882,-5.019825255742886,0.015226316305783671,Original +39,0.2711864406779661,0.1602401129943503,-3.8619097169864767,0.030693492269553303,Original +40,0.288135593220339,0.23596986817325802,-39.57142857142854,0.0006380001300463167,Original +41,0.1694915254237288,0.12238700564971752,-4.38578568651365,0.02195236472320489,Original +42,0.2542372881355932,0.1519774011299435,-5.530747598736873,0.011647446932010377,Original +43,0.21666666666666667,0.15254237288135594,-5.350441310978211,0.012770724119522098,Original +44,0.22033898305084745,0.14357344632768362,-5.128214329323895,0.01435823217533278,Original +45,0.23728813559322035,0.1646186440677966,-4.953014798968853,0.015795677695098962,Original +46,0.3050847457627119,0.17323446327683617,-5.3345252289586895,0.012876563862984138,Original +47,0.23728813559322035,0.1643361581920904,-3.2248357074853122,0.04840566051832893,Original +48,0.23728813559322035,0.16871468926553673,-3.011507892829531,0.05714319479454041,Original +49,0.3050847457627119,0.1646186440677966,-16.597491007684166,0.0004760985758523895,Original +50,0.3,0.16525423728813557,-6.7289971752910285,0.0067007729656368455,Original +51,0.2033898305084746,0.13940677966101697,-4.870967741935483,0.016533426116271753,Original +52,0.2542372881355932,0.1689265536723164,-3.6720208922977697,0.03495083324055868,Original +53,0.22033898305084745,0.15586158192090396,-2.738286769182844,0.07144110545918902,Original +54,0.22033898305084745,0.14350282485875704,-3.588902734990965,0.03705188832887151,Original +55,0.3050847457627119,0.18983050847457625,-4.577628510425044,0.01956818745991966,Original +56,0.2711864406779661,0.1853813559322034,-3.734927184999753,0.033461118399696864,Original +57,0.2542372881355932,0.21346516007532956,-7.919995572991999,0.015570889550764348,Original +58,0.23333333333333334,0.17796610169491528,-3.7720217587055536,0.03262066446770594,Original +59,0.3389830508474576,0.1815677966101695,-10.554502580376617,0.0018167602089980005,Original +60,0.2711864406779661,0.1605225988700565,-5.3922713771638495,0.012497927330704648,Original +61,0.288135593220339,0.19837570621468928,-2.935710690049308,0.06071996824652531,Original +62,0.13559322033898305,0.11789077212806026,-1.9366012620612738,0.19241125153029964,Original +63,0.3050847457627119,0.19004237288135595,-4.661502359215338,0.01863137464464403,Original +64,0.2711864406779661,0.1730225988700565,-4.393369811625047,0.021851178700442182,Original +65,0.26666666666666666,0.1822033898305085,-3.3377105216719656,0.04446314563719635,Original +66,0.22033898305084745,0.13509887005649718,-6.972220994378161,0.0060547347831298665,Original +67,0.22033898305084745,0.15607344632768363,-4.4996604055507525,0.020494569188795435,Original +68,0.2542372881355932,0.13905367231638419,-7.532784229621696,0.004849699633498134,Original +69,0.3050847457627119,0.2234463276836158,-3.191276519463654,0.049662470752465,Original +70,0.23728813559322035,0.17330508474576273,-2.1914765988605094,0.11609378562161582,Original +71,0.2542372881355932,0.19004237288135595,-3.5786014890819224,0.03732340310992795,Original +72,0.2542372881355932,0.17309322033898306,-4.574785584853341,0.019601000667008463,Original +73,0.16666666666666666,0.10593220338983052,-2.315042178850111,0.10355228938029219,Original +74,0.2711864406779661,0.21101694915254235,-5.377347857529729,0.01259437003534445,Original +75,0.23728813559322035,0.17704802259887006,-4.366099615611315,0.022217857702086435,Original +76,0.1864406779661017,0.1573446327683616,-4.756098094357983,0.04147677166903169,Original +77,0.22033898305084745,0.17309322033898306,-4.258383219097977,0.023746522626264043,Original +78,0.23728813559322035,0.14745762711864407,-2.850671138558804,0.06507315235014592,Original +79,0.22033898305084745,0.16883239171374767,-1.940875951377627,0.19179259810170837,Original +80,0.25,0.13983050847457626,-13.57805716454443,0.0008640560542882232,Original +81,0.23728813559322035,0.1687853107344633,-4.943577756944967,0.01587832283622757,Original +82,0.25,0.19491525423728814,-3.7527767497325675,0.03305327992358387,Original +83,0.2711864406779661,0.1853813559322034,-3.734927184999753,0.033461118399696864,Original +84,0.288135593220339,0.14336158192090395,-9.12238026954469,0.002784009395450553,Original +85,0.2033898305084746,0.15600282485875705,-4.763659834348825,0.017567478307811073,Original +86,0.23728813559322035,0.1515065913370998,-4.591824862480486,0.044299673534495966,Original +87,0.1864406779661017,0.1096045197740113,-9.714285714285717,0.002316933797952584,Original +88,0.288135593220339,0.2490819209039548,-3.390087084881917,0.04277212562874923,Original +89,0.288135593220339,0.19858757062146892,-2.978265932915313,0.058678380877150695,Original +90,0.2542372881355932,0.2024482109227872,-2.539664030967854,0.12632350193838743,Original +91,0.3333333333333333,0.23728813559322035,-8.01387685344754,0.004056193290243036,Original +92,0.23728813559322035,0.1434322033898305,-5.819049164593993,0.01010570903664765,Original +93,0.288135593220339,0.20218926553672317,-2.735771061149155,0.07159231928803704,Original +94,0.23728813559322035,0.13919491525423727,-4.655912421566584,0.018691975738082102,Original +95,0.2033898305084746,0.14766949152542372,-3.1966382474552573,0.04945892607281697,Original +96,0.25,0.13559322033898305,-16.53405576378645,0.00048155159575766156,Original +97,0.1864406779661017,0.13509887005649718,-2.456740106111629,0.09113124582704853,Original +98,0.2033898305084746,0.15635593220338984,-2.448992878796489,0.09176170732285532,Original +99,0.2711864406779661,0.17295197740112994,-12.697505573117574,0.0010536643393062766,Original +100,0.23728813559322035,0.18545197740112995,-2.597095416447633,0.08057786815687772,Original +101,0.2711864406779661,0.1602401129943503,-7.0060661223464,0.00597143776668511,Original +102,0.22033898305084745,0.16871468926553673,-3.3950093870826388,0.042617435426627326,Original +103,0.2033898305084746,0.1478813559322034,-2.899678131794266,0.06251860149004074,Original +104,0.13559322033898305,0.10131826741996235,-1.9782608695652164,0.1864941692932114,Original +105,0.2033898305084746,0.15628531073446328,-3.1594347385098827,0.05089331398223453,Original +106,0.3559322033898305,0.29964689265536726,-10.350649350649334,0.001923817806020011,Original +107,0.2033898305084746,0.14766949152542372,-5.257547050264664,0.013404952501338205,Original +108,0.22033898305084745,0.15190677966101696,-2.742051411140234,0.07121556090757529,Original +109,0.3333333333333333,0.17796610169491528,-7.701540462154052,0.004549748975956458,Original +110,0.23728813559322035,0.10557909604519773,-7.476466358952792,0.004955591570430506,Original +111,0.2033898305084746,0.11843220338983051,-3.149160708078649,0.05129865697051939,Original +112,0.23728813559322035,0.1602401129943503,-2.951009970239908,0.05997588776618918,Original +113,0.2033898305084746,0.18559322033898307,-2.848958479370646,0.06516476187287569,Original +114,0.1694915254237288,0.11798493408662901,-5.251610061723054,0.034398946176199485,Original +115,0.4,0.24152542372881358,-10.949598818482546,0.001630748987105004,Original +116,0.2542372881355932,0.15593220338983052,-6.457745685519285,0.007532728000207892,Original +117,0.1864406779661017,0.13926553672316386,-5.731425162505742,0.010544436415387572,Original +118,0.1864406779661017,0.16299435028248588,-3.608695652173914,0.06894253641177729,Original +119,0.3220338983050847,0.1518361581920904,-9.402305491422489,0.0025489364534890947,Original +120,0.25,0.211864406779661,-7.794228634059958,0.004395375691816533,Original +121,0.26666666666666666,0.24152542372881355,-3.098582276011423,0.05335457237433866,Original +122,0.3,0.24152542372881355,-3.1118145559317116,0.052806824094954664,Original +123,0.23333333333333334,0.17372881355932202,-2.8133333333333344,0.0671073341823401,Original +124,0.288135593220339,0.17274011299435027,-4.913402497837348,0.016146391435118687,Original +125,0.1694915254237288,0.13926553672316386,-3.6722304933420005,0.03494573014722615,Original +126,0.2711864406779661,0.21497175141242938,-2.7808379940637797,0.06894254556283926,Original +127,0.2542372881355932,0.21115819209039546,-2.8047829882173874,0.06758426686914822,Original +128,0.23333333333333334,0.19067796610169493,-5.2571452098620695,0.01340778439483409,Original +129,0.2033898305084746,0.11822033898305086,-6.986639340848923,0.0060190635058901916,Original +130,0.2033898305084746,0.16045197740112996,-3.771816669089535,0.03262523637568915,Original +131,0.2033898305084746,0.1605225988700565,-2.828645932579055,0.0662636402779568,Original +132,0.15254237288135594,0.10103578154425613,-3.0973237391013955,0.0903361791294822,Original +133,0.3050847457627119,0.1434322033898305,-9.210615078582297,0.0027069229317899495,Original +134,0.288135593220339,0.1896186440677966,-4.894736842105265,0.016315166384743927,Original +135,0.3559322033898305,0.23213276836158192,-5.049357722281286,0.014983160961383876,Original +136,0.2542372881355932,0.2236581920903955,-3.6741121386261355,0.03489996095058197,Original +137,0.3050847457627119,0.2531779661016949,-2.8305893057056326,0.06615751198285083,Original +138,0.2542372881355932,0.23587570621468926,-2.137186834969644,0.16604961117054062,Original +139,0.18333333333333332,0.11864406779661017,-3.5335467141319046,0.038541217308166585,Original +140,0.2033898305084746,0.12662429378531073,-5.193353369212653,0.013867428943147485,Original +141,0.2542372881355932,0.1434322033898305,-6.313436023720237,0.00803112776427947,Original +142,0.2542372881355932,0.15600282485875705,-3.541825936051609,0.03831367266160501,Original +143,0.3050847457627119,0.16440677966101697,-8.34181386665146,0.00361131585696273,Original +144,0.2033898305084746,0.1348870056497175,-6.050693757052515,0.009055606464007803,Original +145,0.22033898305084745,0.18135593220338983,-5.276561879022918,0.01327183724912819,Original +146,0.26666666666666666,0.17796610169491525,-5.028024029479735,0.015158299248004988,Original +147,0.26666666666666666,0.1822033898305085,-7.92070349524896,0.004195693967370819,Original +148,0.23728813559322035,0.18545197740112995,-2.9798032437751925,0.05860625147640999,Original +149,0.3728813559322034,0.26958568738229755,-2.957002218815846,0.09786519767422906,Original +150,0.23333333333333334,0.1652542372881356,-3.6229338549736347,0.03617265345530896,Original +151,0.2711864406779661,0.22796610169491527,-2.846542418148333,0.06529426992156556,Original +152,0.2711864406779661,0.14774011299435028,-4.041966945913288,0.02725273967177257,Original +153,0.2542372881355932,0.19406779661016949,-3.4332517325533063,0.041439515376910444,Original +154,0.2711864406779661,0.14738700564971752,-4.874543567127261,0.016500349284977168,Original +155,0.23333333333333334,0.211864406779661,-4.387862045841163,0.021924602127579518,Original +156,0.23728813559322035,0.1796610169491525,-5.666666666666667,0.029758752589905717,Original +157,0.23728813559322035,0.19830508474576272,-3.682947537517003,0.034686070852458215,Original +158,0.22033898305084745,0.15160075329566855,-4.166330062408052,0.05306537932277536,Original +159,0.23728813559322035,0.19392655367231637,-2.224345699469886,0.11258691448891019,Original +160,0.1694915254237288,0.1056497175141243,-4.336541993961348,0.022624336731357778,Original +161,0.23728813559322035,0.1797551789077213,-3.8907727779580643,0.060159036553398035,Original +162,0.2542372881355932,0.18968926553672316,-2.986928104575163,0.058273407134112075,Original +163,0.22033898305084745,0.14048964218455745,-3.229591652487888,0.08397470085153524,Original +164,0.1694915254237288,0.1391949152542373,-3.8838243353571547,0.03024595201311427,Original +165,0.2542372881355932,0.16885593220338982,-3.6939328104988762,0.03442246894212467,Original +166,0.2542372881355932,0.20268361581920902,-3.2230287355249403,0.04847230125929893,Original +167,0.2542372881355932,0.22201351933310776,-3.1365710180077406,0.0518009301559042,Original +168,0.26666666666666666,0.21610169491525424,-3.493722261155749,0.0396601427679115,Original +169,0.23728813559322035,0.1910546139359699,-7.418137270026101,0.017691506692045566,Original +170,0.3050847457627119,0.23192090395480225,-3.3773352617852765,0.043176200293171145,Original +171,0.2033898305084746,0.1432909604519774,-1.8928833055825962,0.15471358909740393,Original +172,0.2033898305084746,0.1290018832391714,-3.8230779561170367,0.06211218967841154,Original +173,0.3050847457627119,0.2193502824858757,-4.75599598618577,0.017644508859181555,Original +174,0.2033898305084746,0.1601694915254237,-2.0803333919424123,0.12896148504661395,Original +175,0.288135593220339,0.1901129943502825,-3.182414988821109,0.05000120219940348,Original +176,0.3389830508474576,0.2531779661016949,-6.182185493474629,0.008522718517249426,Original +177,0.3559322033898305,0.2658898305084746,-4.202979499690264,0.024585862477064943,Original +178,0.288135593220339,0.2192090395480226,-3.4729797480883895,0.040259338911251656,Original +179,0.3050847457627119,0.19399717514124296,-7.840633887955942,0.004320681154632681,Original +180,0.288135593220339,0.19413841807909604,-5.2860346002987235,0.013206166418066021,Original +181,0.2711864406779661,0.2078154425612053,-2.134529747722321,0.16636576065135147,Original +182,0.2711864406779661,0.22372881355932203,-3.59486813709167,0.036895807895617604,Original +183,0.3220338983050847,0.12245762711864407,-7.84890994931086,0.004307536410571643,Original +184,0.2833333333333333,0.21610169491525422,-2.401102376173316,0.09577949808406833,Original +185,0.2542372881355932,0.20254237288135596,-7.415534221028932,0.005073622594306211,Original +186,0.2711864406779661,0.1772598870056497,-6.333333333333335,0.007959883216421762,Original +187,0.18333333333333332,0.15677966101694915,-6.26666666666667,0.00820192086968827,Original +188,0.2542372881355932,0.19413841807909604,-5.382088936904238,0.012563625893657768,Original +189,0.2711864406779661,0.23545197740112994,-1.9022556390977454,0.30811702486531156,Original +190,0.35,0.2754237288135593,-9.191300234460838,0.0027235534322141448,Original +191,0.26666666666666666,0.20338983050847456,-2.536300556483895,0.08495405146875473,Original +192,0.23728813559322035,0.1307909604519774,-10.043517801177494,0.002101488368509634,Original +193,0.22033898305084745,0.14766949152542375,-6.856800905858473,0.006350573689746284,Original +194,0.288135593220339,0.16871468926553673,-7.1482687865653824,0.005637501684561561,Original +195,0.23333333333333334,0.1483050847457627,-2.948006683383839,0.06012104778921462,Original +196,0.26666666666666666,0.14830508474576273,-4.1037036556074025,0.02618807381282589,Original +197,0.22033898305084745,0.16885593220338985,-6.912825719494809,0.0062046516135269335,Original +198,0.21666666666666667,0.1694915254237288,-3.936227748605116,0.02920926328962092,Original +199,0.288135593220339,0.19858757062146892,-3.923159163483533,0.029463468310785026,Original +200,0.4745762711864407,0.3416666666666667,-7.374780272477125,0.0051546431499948815,Original +201,0.4406779661016949,0.346045197740113,-6.302829818170098,0.008069446993235066,Original +202,0.5423728813559322,0.38834745762711864,-7.130307147698573,0.005678300191929151,Original +203,0.4576271186440678,0.36292372881355933,-3.837369345431411,0.031204798309479358,Original +204,0.4745762711864407,0.37542372881355934,-7.414573731136387,0.005075512686812505,Original +205,0.43333333333333335,0.3686440677966102,-4.46962233410428,0.020866618588713408,Original +206,0.4666666666666667,0.3601694915254237,-7.358286550031938,0.005187918406325382,Original +207,0.4745762711864407,0.3504237288135593,-6.6711622996447275,0.006867611409633449,Original +208,0.3898305084745763,0.3543785310734463,-3.127498225142409,0.05216674452503871,Original +209,0.3898305084745763,0.3331920903954802,-1.7611959878594572,0.1764230443495778,Original +210,0.5254237288135594,0.4387005649717514,-3.5958734600175015,0.03686958706248058,Original +211,0.4576271186440678,0.3257062146892655,-6.924029541793238,0.02022773333106733,Original +212,0.4576271186440678,0.3334745762711865,-4.989086378646126,0.015484895041677766,Original +213,0.4406779661016949,0.29555084745762716,-4.560758399398884,0.019763952369741263,Original +214,0.4745762711864407,0.37570621468926557,-3.448652473575024,0.04097685603288608,Original +215,0.4406779661016949,0.388135593220339,-4.65582342119235,0.018692942668843485,Original +216,0.4406779661016949,0.37125706214689264,-4.6417007257391365,0.018847202898111523,Original +217,0.4666666666666667,0.3008474576271187,-3.3764082011477075,0.04320576532803089,Original +218,0.4745762711864407,0.3799435028248587,-2.299434047232845,0.10504286102353275,Original +219,0.4745762711864407,0.3674435028248587,-3.575513903469711,0.03740527845126834,Original +220,0.4745762711864407,0.4132768361581921,-2.677380542667445,0.07521629738306508,Original +221,0.5166666666666667,0.3559322033898305,-8.779860612843027,0.0031123529183814187,Original +222,0.4406779661016949,0.38210922787193974,-2.7837850452128095,0.10845055325071953,Original +223,0.4915254237288136,0.35812146892655367,-2.5160267825812963,0.08647807452830744,Original +224,0.4166666666666667,0.3601694915254237,-3.233808333817773,0.0480764627944047,Original +225,0.4406779661016949,0.36694915254237287,-2.474962294633946,0.08966910755373639,Original +226,0.4745762711864407,0.3294491525423729,-5.302012165253797,0.013096358284056045,Original +227,0.4406779661016949,0.3333333333333333,-4.82600482600724,0.016956798300543117,Original +228,0.4666666666666667,0.3177966101694915,-7.026666666666668,0.0059214753537602605,Original +229,0.5084745762711864,0.379590395480226,-4.111518045317934,0.02605716031917639,Original +230,0.4576271186440678,0.41007532956685494,-4.605203601134296,0.04405939035816047,Original +231,0.5084745762711864,0.40127118644067794,-2.4156666462852487,0.09453530050793599,Original +232,0.4666666666666667,0.3686440677966102,-3.612819691689752,0.036431155449573864,Original +233,0.5254237288135594,0.36299435028248583,-4.890297438969469,0.016355645150575224,Original +234,0.4406779661016949,0.3626412429378531,-2.3461567951496813,0.1006566092112587,Original +235,0.4915254237288136,0.41035781544256117,-3.6256912791125826,0.06836231728156876,Original +236,0.6779661016949152,0.413135593220339,-6.782402329135958,0.006551418920014316,Original +237,0.5932203389830508,0.40091807909604515,-7.503655577513371,0.004904092057273042,Original +238,0.423728813559322,0.3500706214689266,-4.153796413823909,0.025363321264457474,Original +239,0.5254237288135594,0.37645951035781544,-9.627032312606932,0.010618294541056795,Original +240,0.3728813559322034,0.307909604519774,-2.524577979762879,0.08583120198777253,Original +241,0.5,0.423728813559322,-2.5287816912705736,0.08551538240691227,Original +242,0.4576271186440678,0.32478813559322034,-11.455677986899559,0.0014276482471677783,Original +243,0.43333333333333335,0.3050847457627119,-5.140537735016466,0.01426366105655198,Original +244,0.43333333333333335,0.3813559322033898,-1.4070831677394464,0.25411009793550343,Original +245,0.4576271186440678,0.32083333333333336,-3.313513183532759,0.045273022622265666,Original +246,0.4406779661016949,0.35056497175141244,-2.7353018723026903,0.07162056517888178,Original +247,0.4576271186440678,0.3629943502824859,-5.564047150200574,0.011454300879796521,Original +248,0.5084745762711864,0.3461158192090395,-16.539568345323744,0.0004810744367490006,Original +249,0.3898305084745763,0.3501412429378531,-2.5736529074083476,0.08223167628896547,Original +250,0.4576271186440678,0.354590395480226,-4.788344485301914,0.01732232163571842,Original +251,0.43333333333333335,0.3728813559322034,-2.9121711386422233,0.061887533056045364,Original +252,0.4745762711864407,0.3759887005649718,-2.087127965330441,0.17214627140969266,Original +253,0.5333333333333333,0.3898305084745763,-5.7519673334545445,0.010439377744934933,Original +254,0.4406779661016949,0.35882768361581924,-4.431514865270233,0.021351308327048055,Original +255,0.45,0.35169491525423735,-2.667077795032928,0.07587912763738508,Original +256,0.4,0.32627118644067793,-2.8103535287436263,0.06727306836232257,Original +257,0.4406779661016949,0.3484934086629002,-4.258091646437124,0.0509728994156799,Original +258,0.4576271186440678,0.3923022598870056,-4.900769721140662,0.01626036695364969,Original +259,0.3898305084745763,0.28700564971751413,-6.458938312987853,0.007528780847255386,Original +260,0.423728813559322,0.3671610169491525,-3.7775875209558483,0.032496904050512185,Original +261,0.4745762711864407,0.40084745762711865,-5.117647058823531,0.014439978840531617,Original +262,0.4576271186440678,0.31631355932203387,-4.390851056341401,0.021884716395917995,Original +263,0.3333333333333333,0.2711864406779661,-4.016632088371216,0.02770576700479268,Original +264,0.4406779661016949,0.3540489642184557,-3.260853543038344,0.08256523104344153,Original +265,0.4576271186440678,0.37563559322033896,-4.895597481406331,0.016307334114510047,Original +266,0.5084745762711864,0.39682203389830506,-4.774692154738101,0.017457356531658943,Original +267,0.5166666666666667,0.3771186440677966,-6.9688146846829495,0.00606320270460328,Original +268,0.4745762711864407,0.40480225988700563,-2.244619924618763,0.15392599376576382,Original +269,0.4067796610169492,0.33785310734463275,-2.752891462679799,0.07057104088631745,Original +270,0.45,0.32627118644067793,-6.584419205304784,0.007128240934046268,Original +271,0.4067796610169492,0.33340395480225987,-2.392200090989989,0.09654983618738305,Original +272,0.4067796610169492,0.3500706214689266,-2.147266074887937,0.12101972629670502,Original +273,0.4915254237288136,0.39896421845574387,-4.392202321683557,0.048125248972546525,Original +274,0.4067796610169492,0.363135593220339,-2.037441371070689,0.1343771965437019,Original +275,0.48333333333333334,0.3601694915254237,-4.043803171028667,0.027220277010713455,Original +276,0.4067796610169492,0.32916666666666666,-4.334673395356122,0.0226503559908631,Original +277,0.4166666666666667,0.37711864406779666,-1.9749677244040724,0.14275464845123928,Original +278,0.5254237288135594,0.4472457627118644,-4.483251319120438,0.020696735526511162,Original +279,0.4915254237288136,0.346045197740113,-5.143574545220902,0.014240481118726733,Original +280,0.4576271186440678,0.29971751412429376,-8.707841203347302,0.0031878312735672807,Original +281,0.4745762711864407,0.3206920903954802,-6.159177470925122,0.008612932023026598,Original +282,0.3898305084745763,0.3248587570621469,-5.276561879022925,0.01327183724912815,Original +283,0.4406779661016949,0.34597457627118644,-4.470968814876207,0.020849754679967962,Original +284,0.5084745762711864,0.34590395480225994,-3.714817178281528,0.033928358727363876,Original +285,0.4576271186440678,0.3923728813559322,-2.707175832648326,0.07333935451425552,Original +286,0.3898305084745763,0.3082627118644068,-3.1270512783828903,0.052184849438917776,Original +287,0.423728813559322,0.37617702448210927,-2.1147485270709563,0.16874539562177085,Original +288,0.5423728813559322,0.36292372881355933,-5.215987959681293,0.013702017252772999,Original +289,0.4576271186440678,0.3816384180790961,-2.042749961024093,0.17780819467640524,Original +290,0.43333333333333335,0.3559322033898305,-2.3848638865930605,0.09719033554345949,Original +291,0.4067796610169492,0.3163841807909605,-13.063945294843638,0.0009686387721898685,Original +292,0.3898305084745763,0.346045197740113,-7.51860437612632,0.0048760779327184315,Original +293,0.4576271186440678,0.35,-5.823074704906403,0.010086133108957392,Original +294,0.5084745762711864,0.4718455743879473,-2.3238483653527293,0.145752005957584,Original +295,0.423728813559322,0.38747645951035786,-2.48387096774193,0.13098307174005927,Original +296,0.4915254237288136,0.3967514124293785,-6.064766311184656,0.008996522307758008,Original +297,0.4406779661016949,0.3672316384180791,-3.2003787654626485,0.04931755050578723,Original +298,0.423728813559322,0.3670197740112995,-2.739338279816391,0.07137801893519374,Original +299,0.4745762711864407,0.3377118644067797,-8.333345166769199,0.0036219707869269407,Original +300,0.4166666666666667,0.3432203389830508,-3.466666666666669,0.04044399231953359,Original +301,0.4745762711864407,0.3757062146892655,-6.28618557093712,0.00813006680248645,Original +302,0.4745762711864407,0.3204802259887006,-5.96246084588649,0.00943790429103949,Original +303,0.5,0.3644067796610169,-6.357755313912211,0.00787356840736677,Original +304,0.43333333333333335,0.3771186440677966,-1.5819079806991574,0.21181729689488749,Original +305,0.5166666666666667,0.4067796610169492,-4.404557662822478,0.021703011848884367,Original +306,0.4745762711864407,0.4101694915254237,-2.577629100382969,0.12328440517539614,Original +307,0.5084745762711864,0.3753531073446328,-6.31056255621062,0.008041485679774507,Original +308,0.4576271186440678,0.3672316384180791,-7.5424723326565015,0.00483178537362738,Original +309,0.423728813559322,0.3145951035781544,-3.1849674947750137,0.08604712307024512,Original +310,0.3898305084745763,0.32551789077212806,-1.4349594508232855,0.2877633918978512,Original +311,0.4745762711864407,0.354590395480226,-3.4224483105094405,0.041768035740251584,Original +312,0.5084745762711864,0.3714689265536723,-5.34778311027372,0.012788321514374179,Original +313,0.48333333333333334,0.3771186440677966,-2.881670261299946,0.06344247147107336,Original +314,0.423728813559322,0.33749999999999997,-14.93808506039733,0.0006510638055588633,Original +315,0.4915254237288136,0.3628531073446327,-7.3695757821568195,0.005165112471402641,Original +316,0.3898305084745763,0.32937853107344633,-2.297491967496297,0.10523014082412589,Original +317,0.4745762711864407,0.42627118644067796,-4.07489509276445,0.02667808711215743,Original +318,0.4745762711864407,0.3206920903954802,-6.4099048762033455,0.00769333223799094,Original +319,0.288135593220339,0.2196327683615819,-3.2102314527592526,0.04894759775912512,Original +320,0.5254237288135594,0.3799435028248588,-4.547561092589984,0.019918871326456516,Original +321,0.3898305084745763,0.3415960451977401,-2.7606608826884482,0.07011354688774671,Original +322,0.3898305084745763,0.33778248587570625,-2.679574844992895,0.07507605335291008,Original +323,0.4576271186440678,0.36687853107344637,-2.7295669647405267,0.07196693599133859,Original +324,0.5084745762711864,0.3924435028248588,-4.124951949583921,0.02583406935919925,Original +325,0.4406779661016949,0.3293079096045197,-5.9823754058047705,0.009349794795095102,Original +326,0.4406779661016949,0.3120056497175141,-4.032355280571854,0.027423480466249588,Original +327,0.559322033898305,0.3836864406779661,-4.836938005621383,0.01685256521081953,Original +328,0.4406779661016949,0.3209745762711864,-4.989071878278333,0.015485018365118487,Original +329,0.43333333333333335,0.3389830508474576,-6.817746450746516,0.00645497746167015,Original +330,0.4067796610169492,0.3292372881355932,-5.0198252557428855,0.015226316305783671,Original +331,0.3898305084745763,0.3586158192090395,-4.20590123701231,0.0245406514009237,Original +332,0.4745762711864407,0.36730225988700566,-5.386626427441342,0.012534293739746833,Original +333,0.5,0.385593220338983,-9.000000000000007,0.002895812161864139,Original +334,0.5084745762711864,0.3543785310734463,-13.59402614992177,0.0008610534762829226,Original +335,0.3898305084745763,0.2635593220338983,-2.8494467148061093,0.10425518121888491,Original +336,0.423728813559322,0.3543785310734463,-12.120686030907725,0.0012087821339881704,Original +337,0.45,0.34745762711864403,-9.372619697821948,0.00257258879538172,Original +338,0.576271186440678,0.3879943502824859,-9.187860980455177,0.0027265288821396168,Original +339,0.4745762711864407,0.3543079096045198,-3.2679435909119663,0.046849662447178565,Original +340,0.5254237288135594,0.4048728813559322,-6.267903613009531,0.008197342653735116,Original +341,0.3898305084745763,0.3246468926553672,-2.4329184228681733,0.0930869236120761,Original +342,0.45,0.364406779661017,-7.823426359338976,0.004348181653136324,Original +343,0.4576271186440678,0.36313559322033895,-3.4592703994453387,0.04066170197907394,Original +344,0.3898305084745763,0.27429378531073445,-13.77956011482232,0.0008271633740114473,Original +345,0.4406779661016949,0.34562146892655365,-2.1945120664549007,0.11576447779308814,Original +346,0.4576271186440678,0.37535310734463284,-2.7799229360270217,0.06899512300862856,Original +347,0.3728813559322034,0.28691148775894537,-2.3579139690812547,0.14242127080654132,Original +348,0.4406779661016949,0.328954802259887,-8.470193664445993,0.003454751941090446,Original +349,0.4666666666666667,0.4110169491525424,-3.8450462874172215,0.031043679643556813,Original +350,0.4745762711864407,0.3502824858757062,-8.315218406202995,0.003644917117727713,Original +351,0.4576271186440678,0.3632062146892655,-2.5878167974480393,0.08122750520425058,Original +352,0.5166666666666667,0.4110169491525424,-9.907502030846539,0.0021871339690660045,Original +353,0.4576271186440678,0.417725988700565,-3.7729601826121053,0.03259975536350789,Original +354,0.4915254237288136,0.3757062146892655,-6.25244138363679,0.00825481406798463,Original +355,0.423728813559322,0.3332627118644068,-9.283339926398568,0.0026454980508478078,Original +356,0.4915254237288136,0.36320621468926556,-4.276430704251948,0.023481122723639215,Original +357,0.5084745762711864,0.40543785310734465,-3.1308652437968307,0.05203060761024695,Original +358,0.4576271186440678,0.3670197740112995,-3.3865448130535376,0.04288388807797634,Original +359,0.4745762711864407,0.3963983050847458,-2.355804437045393,0.09977877653717733,Original +360,0.4067796610169492,0.2953389830508475,-13.691233125567791,0.000843071969469656,Original +361,0.4666666666666667,0.3983050847457627,-3.3886778033477842,0.042816545584287505,Original +362,0.5166666666666667,0.3855932203389831,-3.8368077899595163,0.03121662594701848,Original +363,0.5254237288135594,0.30430790960451976,-4.336743966663418,0.022621526682653798,Original +364,0.4745762711864407,0.35007062146892653,-7.021230179838503,0.005934606781342949,Original +365,0.4915254237288136,0.37521186440677967,-3.216398138197251,0.04871783239364945,Original +366,0.423728813559322,0.3418079096045198,-3.841143835488625,0.031125448001670088,Original +367,0.423728813559322,0.34187853107344635,-3.1832852004313463,0.04996780945878653,Original +368,0.5,0.3686440677966102,-5.6286160538198615,0.011091567639778482,Original +369,0.4406779661016949,0.3332627118644068,-6.9539103841401015,0.0061004380937106,Original +370,0.4745762711864407,0.37584745762711863,-2.9318808768994544,0.06090803764359439,Original +371,0.559322033898305,0.4004237288135593,-3.7578369991437737,0.03293881990782305,Original +372,0.4576271186440678,0.37966101694915255,-5.662208585049302,0.010908789440745359,Original +373,0.4576271186440678,0.3548022598870057,-3.46808249954851,0.04040248651626954,Original +374,0.4406779661016949,0.3711158192090396,-2.4516068885038713,0.09154838744355702,Original +375,0.5084745762711864,0.36271186440677966,-9.86487655643415,0.0022149241738372996,Original +376,0.4576271186440678,0.3713983050847458,-8.050860563356986,0.0040025200155213245,Original +377,0.423728813559322,0.32507062146892657,-2.8491875970653315,0.06515249712984865,Original +378,0.4915254237288136,0.36306497175141245,-6.0781754491615825,0.008940693333973865,Original +379,0.5166666666666667,0.4406779661016949,-5.490939506738963,0.011883974620146622,Original +380,0.559322033898305,0.4046610169491526,-3.190034610277395,0.04970976762287175,Original +381,0.4406779661016949,0.33375706214689266,-2.933998334563753,0.06080396633288608,Original +382,0.4406779661016949,0.3626412429378531,-2.0904012105524146,0.1277280900520854,Original +383,0.4745762711864407,0.3504237288135593,-5.129835675678766,0.014345743161730394,Original +384,0.4067796610169492,0.3543079096045198,-4.181753231671517,0.02491756822152362,Original +385,0.423728813559322,0.346045197740113,-2.667891875399661,0.07582649117357966,Original +386,0.4915254237288136,0.3714689265536723,-5.879572942861632,0.009816556345478428,Original +387,0.5084745762711864,0.37966101694915255,-6.598309513974847,0.007085641181587867,Original +388,0.4745762711864407,0.3627824858757063,-4.484419263456089,0.020682260794596567,Original +389,0.559322033898305,0.3836864406779661,-4.199186687635008,0.024644713133568197,Original +390,0.4406779661016949,0.35444915254237286,-7.140350877192979,0.0056554388389449695,Original +391,0.4406779661016949,0.3755649717514124,-4.427578329769953,0.021402204751343277,Original +392,0.43333333333333335,0.37288135593220345,-2.6341579069577183,0.0780460308657842,Original +393,0.4067796610169492,0.33719397363465164,-2.622926709469525,0.1197907664196296,Original +394,0.5084745762711864,0.36299435028248583,-8.553684424935213,0.003357721498987871,Original +395,0.4576271186440678,0.33326271186440676,-18.123845744121766,0.00036642093322613114,Original +396,0.5333333333333333,0.36864406779661013,-3.142183764359604,0.051576243518155256,Original +397,0.5,0.3389830508474576,-7.756717518813398,0.004457004963920533,Original +398,0.4745762711864407,0.3290960451977401,-5.1956627047447785,0.013850433541822478,Original +399,0.423728813559322,0.3206920903954802,-4.95328158335475,0.015793349475275394,Original +400,0.4745762711864407,0.4135593220338983,-3.3916011823151724,0.04272446714466854,Original +401,0.4745762711864407,0.3248587570621469,-6.203180801394921,0.008441476456348773,Original +402,0.5254237288135594,0.40077683615819215,-5.011757995152538,0.015293631034164368,Original +403,0.4745762711864407,0.39237288135593223,-11.151144229232496,0.0015455390866235931,Original +404,0.4067796610169492,0.28714689265536725,-3.409092533983689,0.04217876250784662,Original +405,0.423728813559322,0.33173258003766476,-3.524320965413247,0.07193114099352556,Original +406,0.43333333333333335,0.3305084745762712,-2.3067562719090997,0.10434037801243047,Original +407,0.4666666666666667,0.36016949152542377,-2.4527621833439794,0.09145430123164801,Original +408,0.4067796610169492,0.3483050847457627,-11.775818566563489,0.00713429023847155,Original +409,0.423728813559322,0.38806497175141247,-2.8670514268084752,0.06420504585478538,Original +410,0.4406779661016949,0.4092514124293785,-2.568138862870189,0.08262673431862806,Original +411,0.4745762711864407,0.3839689265536724,-2.903867111427604,0.06230610742939453,Original +412,0.4666666666666667,0.3940677966101695,-6.80809631531433,0.006481122703408014,Original +413,0.4406779661016949,0.3542372881355932,-2.2861298780156964,0.10633394781548948,Original +414,0.4745762711864407,0.38425141242937855,-2.687063737163629,0.07459984908288483,Original +415,0.4406779661016949,0.3536723163841808,-3.172729727206767,0.08662826593910922,Original +416,0.5166666666666667,0.35593220338983045,-4.843649660017283,0.016788991301625738,Original +417,0.4406779661016949,0.3670197740112994,-2.9579994448141673,0.05963975694231845,Original +418,0.423728813559322,0.30903954802259886,-2.5908795387845776,0.12224781736312054,Original +419,0.576271186440678,0.4008474576271186,-8.297720726530276,0.0036672497469535698,Original +420,0.4406779661016949,0.38182674199623357,-1.9595943025088522,0.1891147904640091,Original +421,0.3898305084745763,0.32076271186440675,-4.322021374340152,0.022827550876124904,Original +422,0.4745762711864407,0.4388418079096046,-2.947245022552593,0.0601579316359931,Original +423,0.4915254237288136,0.39632768361581916,-4.060240963855425,0.02693187975555432,Original +424,0.3898305084745763,0.3418079096045198,-3.27165152540788,0.04671879927774851,Original +425,0.3898305084745763,0.3483992467043315,-5.499999999999996,0.031504003041813854,Original +426,0.5833333333333334,0.4279661016949152,-10.734900802433872,0.0017285150870629262,Original +427,0.5084745762711864,0.34230225988700563,-3.432024351211093,0.041476672849086224,Original +428,0.5254237288135594,0.35021186440677965,-10.964467451419276,0.0016242528673193293,Original +429,0.5254237288135594,0.37577683615819213,-5.036096342175906,0.015091717648582546,Original +430,0.4915254237288136,0.4134887005649718,-4.473097699753992,0.020823127617620334,Original +431,0.3559322033898305,0.3081214689265537,-3.5637008822210965,0.037720655653052076,Original +432,0.5084745762711864,0.37563559322033896,-4.420794483496409,0.02149028401764827,Original +433,0.5166666666666667,0.34745762711864403,-5.845641436961716,0.009977307916285254,Original +434,0.4576271186440678,0.3375706214689265,-16.999999999999993,0.00044334353831207803,Original +435,0.4915254237288136,0.4263418079096045,-4.09478757977338,0.026338481224541917,Original +436,0.4406779661016949,0.3378531073446328,-3.7042710296105374,0.034176730688008236,Original +437,0.4745762711864407,0.39661016949152544,-3.8969992136018448,0.029980924038161237,Original +438,0.45,0.3305084745762712,-3.6406043454349715,0.03572662155623015,Original +439,0.5084745762711864,0.42203389830508475,-6.715954603984051,0.006737927535009723,Original +440,0.4067796610169492,0.32916666666666666,-13.44549998474748,0.0008895256433019566,Original +441,0.3898305084745763,0.3208333333333333,-3.6177990004181297,0.0363035983070319,Original +442,0.43333333333333335,0.37288135593220345,-2.5220141862320173,0.08602451986599671,Original +443,0.4666666666666667,0.3940677966101695,-5.016126374955461,0.01525713242925573,Original +444,0.4915254237288136,0.39223163841807906,-3.5593682026592988,0.03783717712998915,Original +445,0.559322033898305,0.41744350282485876,-3.7183243247458044,0.03384627426695061,Original +446,0.5,0.35593220338983056,-4.164132562731401,0.025197328469594847,Original +447,0.559322033898305,0.37584745762711863,-5.567318968997769,0.01143555025754969,Original +448,0.5084745762711864,0.43036723163841806,-3.6908520053100666,0.03449613636026554,Original +449,0.5084745762711864,0.4135593220338983,-6.287241983947008,0.008126201507413536,Original +450,0.423728813559322,0.354590395480226,-4.195847013586463,0.024696683385781026,Original +451,0.559322033898305,0.3545197740112994,-7.100680549678681,0.0057464492134181155,Original +452,0.4406779661016949,0.3925141242937853,-2.652993667727985,0.07679697042443225,Original +453,0.559322033898305,0.32492937853107345,-10.457966297708378,0.0018664458731184775,Original +454,0.423728813559322,0.35868644067796607,-13.182726027622682,0.000943041742019242,Original +455,0.43333333333333335,0.3855932203389831,-4.476919366879844,0.0207754384968935,Original +456,0.3559322033898305,0.3038135593220339,-2.8400584700013614,0.06564341941663877,Original +457,0.4067796610169492,0.3038841807909604,-4.6304395053630865,0.01897139383687084,Original +458,0.423728813559322,0.29950564971751414,-3.9160409861832215,0.02960312502298523,Original +459,0.4915254237288136,0.4095338983050847,-3.343711500585689,0.04426514489947514,Original +460,0.4,0.3516949152542373,-2.764906125414197,0.06986512747198882,Original +461,0.4745762711864407,0.3331920903954802,-3.755601850914824,0.03298931499378954,Original +462,0.4576271186440678,0.3418079096045198,-5.430582663966678,0.012254723173754265,Original +463,0.4067796610169492,0.342090395480226,-2.3719428909642666,0.09833105019693004,Original +464,0.5254237288135594,0.3714689265536723,-9.312498469112693,0.002621388573795643,Original +465,0.4067796610169492,0.3089453860640301,-4.904381946588596,0.03914970069711023,Original +466,0.5084745762711864,0.40120056497175144,-3.3115695002057812,0.04533888578886271,Original +467,0.423728813559322,0.34611581920903955,-5.009915149758147,0.015309062462291299,Original +468,0.4745762711864407,0.3670197740112994,-9.048660230550595,0.002850658340600586,Original +469,0.4406779661016949,0.34138418079096045,-2.360589363770645,0.09934685042336319,Original +470,0.4745762711864407,0.3801553672316384,-2.8398703872208553,0.06565358206650312,Original +471,0.4576271186440678,0.3372175141242938,-3.5534330922688557,0.037997540271468185,Original +472,0.5,0.44491525423728817,-2.26300952742407,0.10862358012074654,Original +473,0.4576271186440678,0.41779661016949154,-4.370123877984895,0.02216324865119666,Original +474,0.423728813559322,0.32033898305084746,-9.161458990088187,0.011705573839571451,Original +475,0.5254237288135594,0.384180790960452,-3.706246583305507,0.03413002778125795,Original +476,0.3898305084745763,0.3247881355932203,-4.8167110843371415,0.017046061853773893,Original +477,0.4745762711864407,0.35444915254237286,-3.7842912965837407,0.032348636596183485,Original +478,0.423728813559322,0.32492937853107345,-3.4140132990801026,0.04202684058034692,Original +479,0.5423728813559322,0.3884180790960452,-7.8459916831607694,0.004312165413432141,Original +480,0.3898305084745763,0.2825564971751412,-8.059631433861888,0.003989928963853392,Original +481,0.43333333333333335,0.3432203389830508,-4.253333333333336,0.023821477797343008,Original +482,0.4,0.3728813559322034,-3.9191835884530852,0.029541363360540956,Original +483,0.3898305084745763,0.3247175141242938,-3.2435043974379334,0.04772389993910521,Original +484,0.5333333333333333,0.385593220338983,-3.2052128901777346,0.04913559685870657,Original +485,0.5084745762711864,0.3458333333333333,-6.3744164409558755,0.007815387560659307,Original +486,0.559322033898305,0.3120056497175141,-10.204926179291313,0.0020055260919182947,Original +487,0.5423728813559322,0.3757062146892655,-2.741962532807021,0.07122087552292544,Original +488,0.5423728813559322,0.4221045197740113,-5.546054501123849,0.011558137961560508,Original +489,0.4576271186440678,0.37951977401129944,-4.250201094643769,0.02386812308620629,Original +490,0.45,0.3728813559322034,-4.984275273297009,0.015525883080703483,Original +491,0.5084745762711864,0.3332627118644068,-7.640503978788062,0.004655333124225627,Original +492,0.4745762711864407,0.37570621468926557,-6.286185570937117,0.008130066802486461,Original +493,0.3898305084745763,0.3500706214689266,-2.435222247729202,0.09289556214456558,Original +494,0.4576271186440678,0.4049435028248588,-5.1288742164266266,0.0143531474427786,Original +495,0.4666666666666667,0.3389830508474576,-18.452822728966602,0.00034730393994395734,Original +496,0.4406779661016949,0.39505649717514124,-3.8915662650602405,0.16012516703063404,Original +497,0.4915254237288136,0.38799435028248586,-4.361164169302447,0.022285069422793412,Original +498,0.4745762711864407,0.37973163841807916,-2.7216754734382707,0.07244696363099003,Original +499,0.4067796610169492,0.3500706214689266,-2.6495878836827105,0.07702097888753302,Original +500,0.3220338983050847,0.2699152542372881,-4.064774047804071,0.02685304092817379,Original +501,0.4406779661016949,0.354590395480226,-3.6417926698166023,0.035696879201596796,Original +502,0.4745762711864407,0.3714689265536723,-6.236810901332353,0.008313449992551876,Original +503,0.423728813559322,0.3586158192090395,-4.619015880036581,0.019098463373393156,Original +504,0.5084745762711864,0.39661016949152544,-2.9340578815309537,0.06080104286808068,Original +505,0.4067796610169492,0.31228813559322033,-6.309916959963368,0.008043815258589415,Original +506,0.4406779661016949,0.3627824858757063,-2.7319309661026785,0.07182390621092707,Original +507,0.5254237288135594,0.3716101694915254,-2.9643107979224634,0.059338263715170134,Original +508,0.4666666666666667,0.3601694915254237,-3.409705862611856,0.04215978877786793,Original +509,0.4915254237288136,0.3630649717514124,-5.7764663818628845,0.010315868020629015,Original +510,0.4576271186440678,0.33742937853107347,-6.023354344153489,0.009171855397206797,Original +511,0.5084745762711864,0.3258003766478343,-48.499999999999964,0.00042485397562817395,Original +512,0.4576271186440678,0.2867231638418079,-4.840000000000001,0.016823522712679524,Original +513,0.4406779661016949,0.3209745762711864,-3.5295270186824976,0.038652315352543654,Original +514,0.4067796610169492,0.3587570621468927,-2.6549539521063013,0.07666840199371955,Original +515,0.5254237288135594,0.286864406779661,-10.52321129742476,0.0018326710996097446,Original +516,0.5254237288135594,0.3799435028248587,-6.86666666666667,0.006324551982225255,Original +517,0.4745762711864407,0.3755649717514124,-3.418861432750501,0.04187784075273277,Original +518,0.4067796610169492,0.29964689265536726,-5.609714558498766,0.011196176065376182,Original +519,0.3389830508474576,0.32024482109227875,-10.47368421052631,0.008993145222343412,Original +520,0.4915254237288136,0.3706214689265537,-6.849973076409931,0.02065393933365802,Original +521,0.4,0.3601694915254237,-3.1333333333333346,0.05193110106234139,Original +522,0.45,0.3940677966101695,-5.245148133977578,0.013492692472278441,Original +523,0.4745762711864407,0.37175141242937854,-2.224480146264155,0.11257283375371135,Original +524,0.4576271186440678,0.33340395480225987,-4.859171643105984,0.016643155194105735,Original +525,0.45,0.4194915254237288,-2.8609898912604983,0.06452459428804105,Original +526,0.423728813559322,0.33333333333333337,-6.275716324421885,0.008168503479790335,Original +527,0.4406779661016949,0.29138418079096046,-5.949008298622876,0.009498040523452093,Original +528,0.4406779661016949,0.3882768361581921,-4.096759822564065,0.026305115270877533,Original +529,0.5,0.3389830508474576,-2.4666312377251187,0.09033399701898738,Original +530,0.4745762711864407,0.39216101694915256,-2.6138797970211702,0.07941891525657455,Original +531,0.3898305084745763,0.2865348399246704,-10.417426280656978,0.009089216213235031,Original +532,0.559322033898305,0.40494350282485875,-3.5632864433698304,0.03773178159693135,Original +533,0.4067796610169492,0.3081214689265537,-2.706180513915112,0.07340111077355108,Original +534,0.4745762711864407,0.3375706214689265,-3.298098944452524,0.04579869976521974,Original +535,0.423728813559322,0.30374293785310735,-3.6378487523667777,0.0357957127768318,Original +536,0.5254237288135594,0.37980225988700567,-5.144656775656592,0.014232232343107012,Original +537,0.4067796610169492,0.3334039548022599,-5.550438587280521,0.011532723915216122,Original +538,0.4576271186440678,0.3456214689265537,-2.619445228666292,0.07903917160851617,Original +539,0.4406779661016949,0.34597457627118644,-22.472055097835053,0.0001929545430821344,Original +540,0.3389830508474576,0.28707627118644075,-3.9963952715287125,0.02807461925885932,Original +541,0.48333333333333334,0.3855932203389831,-3.388738579570791,0.042814628734577545,Original +542,0.4915254237288136,0.4495291902071563,-3.306335305824179,0.08057456813744839,Original +543,0.4406779661016949,0.33326271186440676,-4.308228721563899,0.023022768426326834,Original +544,0.3898305084745763,0.30409604519774014,-3.009193969690081,0.057248412075778514,Original +545,0.4576271186440678,0.29526836158192094,-7.3968729657562395,0.005110511890500865,Original +546,0.4745762711864407,0.3627824858757062,-4.484419263456091,0.020682260794596535,Original +547,0.4666666666666667,0.3432203389830508,-8.529366637570185,0.0033856068172450396,Original +548,0.5254237288135594,0.3163135593220339,-7.643205738383732,0.004650591457090468,Original +549,0.4067796610169492,0.2828389830508474,-3.1258403580998007,0.052233941204207116,Original +550,0.4666666666666667,0.39406779661016955,-2.982530505374287,0.058478563965002554,Original +551,0.3559322033898305,0.30903954802259886,-7.0399681163437675,0.019586257511572443,Original +552,0.4406779661016949,0.38813559322033897,-1.8603721116372136,0.15977096644438923,Original +553,0.5254237288135594,0.358545197740113,-4.751834676331621,0.017686518806967016,Original +554,0.4915254237288136,0.3206920903954802,-3.269035331124903,0.046811083782206005,Original +555,0.4915254237288136,0.3500706214689266,-3.7595621188342796,0.032899914767886236,Original +556,0.4576271186440678,0.3628531073446328,-5.912181117649445,0.009665258648047835,Original +557,0.5084745762711864,0.38834745762711864,-3.4214826476365072,0.04179756150788246,Original +558,0.5,0.35169491525423724,-4.748264132284415,0.017722668444947104,Original +559,0.3728813559322034,0.274364406779661,-4.35035650844239,0.022433171283776563,Original +560,0.2542372881355932,0.19851694915254237,-3.3448722836047113,0.04422697438835633,Original +561,0.4915254237288136,0.41370056497175145,-3.4764271828366216,0.04015895679106413,Original +562,0.423728813559322,0.341454802259887,-2.2689902462968896,0.10802564058574593,Original +563,0.4745762711864407,0.37153954802259886,-3.9815118119797472,0.028349938061132722,Original +564,0.4067796610169492,0.3755649717514124,-6.27344916163044,0.008176858545156238,Original +565,0.4406779661016949,0.3840395480225989,-9.780487804878028,0.002271338003074173,Original +566,0.4406779661016949,0.35451977401129947,-4.066666666666665,0.02682021272712732,Original +567,0.423728813559322,0.4050141242937853,-3.35873852777973,0.0437742186193112,Original +568,0.4915254237288136,0.3879943502824859,-4.361164169302444,0.02228506942279346,Original +569,0.4406779661016949,0.3421610169491526,-3.2068965517241366,0.049072424069962796,Original +570,0.4915254237288136,0.405225988700565,-3.4882407793268584,0.03981737779020827,Original +571,0.576271186440678,0.4471751412429379,-6.645559382413252,0.006943215258999799,Original +572,0.576271186440678,0.4717514124293785,-6.08276253029822,0.02597846598858569,Original +573,0.4576271186440678,0.2995762711864407,-10.972170265475434,0.0016209009976600197,Original +574,0.3728813559322034,0.33145009416195853,-2.8168113585237156,0.10631108992737348,Original +575,0.4067796610169492,0.3713983050847458,-3.303424358920434,0.045616214777893696,Original +576,0.5084745762711864,0.3542372881355932,-9.208061462894172,0.0027091139233435414,Original +577,0.5084745762711864,0.35007062146892653,-8.932852690514894,0.0029596885792663864,Original +578,0.4745762711864407,0.3626412429378531,-3.365301828336874,0.04356196753364835,Original +579,0.3728813559322034,0.2785310734463277,-5.283646176355578,0.013222684385745894,Original +580,0.4166666666666667,0.3516949152542373,-4.489140335563251,0.02062388445422241,Original +581,0.5254237288135594,0.40098870056497177,-7.988978716899146,0.004092863743178219,Original +582,0.3728813559322034,0.28255649717514125,-6.786220279071329,0.0065409101032541635,Original +583,0.4406779661016949,0.33771186440677964,-5.3858337188310035,0.012539411668401262,Original +584,0.4067796610169492,0.38757062146892657,-2.4285714285714284,0.13584143478190688,Original +585,0.3898305084745763,0.3417372881355932,-6.45766024002191,0.007533010897429456,Original +586,0.4576271186440678,0.3964689265536723,-4.172537109479583,0.02506338811524026,Original +587,0.4067796610169492,0.3169491525423729,-1.9629629629629635,0.14443412236891323,Original +588,0.4666666666666667,0.364406779661017,-6.301611145596256,0.008073865317362929,Original +589,0.423728813559322,0.3463983050847458,-2.043874646199097,0.13354795806792266,Original +590,0.4406779661016949,0.3966101694915254,-3.050041359322525,0.05542620511793901,Original +591,0.4576271186440678,0.3632062146892655,-2.9222455629486523,0.06138443122939498,Original +592,0.4406779661016949,0.3459745762711864,-3.610360849034176,0.03649435588128962,Original +593,0.4067796610169492,0.33785310734463275,-2.653428192571171,0.07676844842248204,Original +594,0.4067796610169492,0.346045197740113,-3.3888747468281344,0.04281033448755565,Original +595,0.38333333333333336,0.3305084745762712,-2.6185237571323796,0.07910189090563483,Original +596,0.4406779661016949,0.3377118644067797,-6.269358747755155,0.00819196116993917,Original +597,0.4406779661016949,0.38389830508474576,-8.48622665150705,0.0034358339331581286,Original +598,0.559322033898305,0.4096045197740113,-4.528095585898831,0.020150257453059105,Original +599,0.423728813559322,0.3882062146892655,-4.997867054186407,0.015410451099079129,Original +600,0.2033898305084746,0.12245762711864408,-3.608916325424141,0.03653155025346661,Original +601,0.3050847457627119,0.24032485875706217,-3.0165616144994942,0.05691423813610457,Original +602,0.2711864406779661,0.20247175141242937,-11.096292845694657,0.00156813241982992,Original +603,0.22033898305084745,0.14759887005649716,-2.8959560191449962,0.06270817363169148,Original +604,0.1694915254237288,0.1265536723163842,-3.972583638518804,0.02851676236015958,Original +605,0.22033898305084745,0.13933615819209041,-4.2672442091017775,0.02361573435774843,Original +606,0.22033898305084745,0.15188323917137478,-3.3981981491560638,0.0767589601591142,Original +607,0.3333333333333333,0.2584745762711864,-5.172270386627225,0.014023853313803805,Original +608,0.23728813559322035,0.17711864406779662,-5.84274742881549,0.00999117759634526,Original +609,0.25,0.1694915254237288,-3.878358759406699,0.03035678137281933,Original +610,0.22033898305084745,0.13057909604519774,-4.374936505877199,0.02209816935791197,Original +611,0.2711864406779661,0.18128531073446327,-4.128224087608492,0.025780103759508587,Original +612,0.1694915254237288,0.10677966101694915,-2.53917501033781,0.12636332880012854,Original +613,0.2033898305084746,0.1518361581920904,-3.0820624157214693,0.054048493565592826,Original +614,0.15254237288135594,0.12648305084745762,-2.492276285625687,0.0883063687998794,Original +615,0.23728813559322035,0.21101694915254238,-4.841386618546792,0.016810392289180406,Original +616,0.2033898305084746,0.13465160075329566,-2.718662067942085,0.11285123312157036,Original +617,0.1864406779661017,0.1602401129943503,-2.531317735884927,0.08532554275225189,Original +618,0.288135593220339,0.19406779661016949,-8.748025509254019,0.0031454221399862072,Original +619,0.23728813559322035,0.15176553672316384,-5.734186911097393,0.01053023168597254,Original +620,0.3389830508474576,0.25296610169491524,-2.7041039897995947,0.07353015997518966,Original +621,0.2033898305084746,0.15190677966101696,-7.409133468514449,0.005086235812941152,Original +622,0.23333333333333334,0.15254237288135594,-3.5204166420089162,0.038905627181690466,Original +623,0.23728813559322035,0.1899717514124294,-5.4166289591447665,0.012342580506632655,Original +624,0.22033898305084745,0.1435734463276836,-5.128214329323895,0.01435823217533278,Original +625,0.2542372881355932,0.16864406779661018,-6.5059819266483885,0.00737522940630693,Original +626,0.23333333333333334,0.20338983050847456,-2.4984439601924695,0.08782706589330419,Original +627,0.23728813559322035,0.18559322033898307,-8.275546059124256,0.0036958123216244147,Original +628,0.5423728813559322,0.48107344632768356,-2.69383410385129,0.07417255068242208,Original +629,0.2542372881355932,0.1643361581920904,-5.023778124999512,0.015193473744699431,Original +630,0.15254237288135594,0.11233521657250471,-2.7098687752823145,0.11346466393886437,Original +631,0.3050847457627119,0.1689265536723164,-5.86064261618386,0.00990581703436056,Original +632,0.23728813559322035,0.15600282485875705,-3.1321496288779005,0.051978794780651254,Original +633,0.2542372881355932,0.17281073446327683,-2.714263880607111,0.07290142241990857,Original +634,0.22033898305084745,0.16878531073446326,-4.294117647058823,0.023224732577996746,Original +635,0.2,0.17372881355932202,-6.200000000000003,0.008453719117567243,Original +636,0.5423728813559322,0.3711158192090396,-5.424838731783427,0.01229078962720508,Original +637,0.288135593220339,0.18566384180790962,-4.932949247691678,0.01597207780336412,Original +638,0.26666666666666666,0.19491525423728812,-6.558251799577894,0.0072094124531609394,Original +639,0.1694915254237288,0.12662429378531073,-4.954648074701186,0.01578143114842512,Original +640,0.3559322033898305,0.18135593220338986,-7.923076923076922,0.004192062411449591,Original +641,0.23333333333333334,0.1398305084745763,-4.195254538472044,0.02470591794360766,Original +642,0.22033898305084745,0.1730225988700565,-4.389315939938801,0.021905189439865372,Original +643,0.2542372881355932,0.17718926553672315,-9.34117796358571,0.0025979582105049733,Original +644,0.22033898305084745,0.13483992467043315,-8.711460465745363,0.012922185254776281,Original +645,0.22033898305084745,0.1812853107344633,-2.856098645795084,0.06478389664989048,Original +646,0.1694915254237288,0.1266949152542373,-3.796706309892804,0.032076336335065445,Original +647,0.18333333333333332,0.13559322033898308,-3.983368200684214,0.02831540894214516,Original +648,0.23728813559322035,0.16913841807909605,-2.2349882150268345,0.11147884166525937,Original +649,0.2711864406779661,0.19858757062146892,-2.5535821728149255,0.08368093910992941,Original +650,0.423728813559322,0.39653954802259883,-2.821061725972151,0.06667985231722355,Original +651,0.2542372881355932,0.22365819209039547,-6.611650272701572,0.007045041561568846,Original +652,0.15254237288135594,0.1290960451977401,-1.6375555461118865,0.24316808629278763,Original +653,0.22033898305084745,0.19830508474576272,-5.461092327709232,0.01206545301607257,Original +654,0.3220338983050847,0.16927966101694913,-3.519195236968839,0.038939748753238246,Original +655,0.23728813559322035,0.1772598870056497,-6.871842709362766,0.00631095591061526,Original +656,0.288135593220339,0.21094632768361582,-3.6366923315065693,0.035824758686131074,Original +657,0.3559322033898305,0.2918079096045198,-2.173368590328213,0.16182504545842755,Original +658,0.2833333333333333,0.1610169491525424,-3.7266706420173588,0.033651949508593376,Original +659,0.3050847457627119,0.18524011299435028,-3.1980807339949875,0.04940434566953938,Original +660,0.2711864406779661,0.1602401129943503,-4.406927169154064,0.02167179821796596,Original +661,0.2542372881355932,0.16468926553672317,-8.002907374821234,0.004072295634170163,Original +662,0.2033898305084746,0.13919491525423727,-8.229362053239296,0.003756251434707851,Original +663,0.5333333333333333,0.44067796610169496,-2.3309944988213394,0.10205524591620584,Original +664,0.23728813559322035,0.16899717514124293,-3.0669989094316277,0.054691170931315904,Original +665,0.21666666666666667,0.15677966101694918,-2.8266666666666658,0.06637194694612157,Original +666,0.22033898305084745,0.17740112994350282,-2.5678020379180455,0.0826509420493295,Original +667,0.2711864406779661,0.19399717514124293,-7.533632446839843,0.00484812769882256,Original +668,0.21666666666666667,0.1228813559322034,-4.990929625938783,0.015469228914864434,Original +669,0.423728813559322,0.4007768361581921,-2.3943057732222077,0.09636694644723628,Original +670,0.3389830508474576,0.3036723163841808,-2.439750182371333,0.09252085884337388,Original +671,0.1864406779661017,0.14058380414312618,-2.9676921508875798,0.0972607614111294,Original +672,0.2542372881355932,0.18997175141242936,-2.727682454452322,0.07208120831037232,Original +673,0.22033898305084745,0.17323446327683617,-2.443879350950321,0.09218076354483482,Original +674,0.2542372881355932,0.1606638418079096,-2.8273201672963535,0.06633616259613716,Original +675,0.2542372881355932,0.16885593220338985,-6.94311791030617,0.00612758913066015,Original +676,0.2833333333333333,0.1694915254237288,-8.226203052846838,0.003760433078739023,Original +677,0.2542372881355932,0.21511299435028247,-3.898316388834917,0.029954591830056016,Original +678,0.26666666666666666,0.15677966101694915,-4.3423725516066725,0.022543397382713576,Original +679,0.2,0.14830508474576273,-12.19999999999999,0.001185729108116372,Original +680,0.23728813559322035,0.12245762711864408,-8.865808969091209,0.0030253402213844243,Original +681,0.21666666666666667,0.15254237288135594,-4.144434018455757,0.025514895723579813,Original +682,0.2711864406779661,0.1899011299435028,-7.530600502193264,0.004853749660576098,Original +683,0.13333333333333333,0.1016949152542373,-3.2331615074619027,0.048100099388329834,Original +684,0.2711864406779661,0.1899717514124294,-3.1834100610600315,0.04996302047691425,Original +685,0.23728813559322035,0.18566384180790962,-4.298438567165345,0.023162647133957697,Original +686,0.18333333333333332,0.15677966101694918,-6.266666666666664,0.008201920869688295,Original +687,0.2711864406779661,0.2027542372881356,-3.621297736079414,0.0362143108175595,Original +688,0.1864406779661017,0.16454802259887005,-5.39640733462664,0.012471369780280921,Original +689,0.1864406779661017,0.11807909604519774,-10.57181908559856,0.0018080338565436176,Original +690,0.5254237288135594,0.3463276836158192,-6.147522027439089,0.008659111206240855,Original +691,0.3050847457627119,0.2615819209039548,-4.063904567563298,0.026868139751298294,Original +692,0.22033898305084745,0.1644774011299435,-5.495043416350755,0.011859301638986507,Original +693,0.5423728813559322,0.45593220338983054,-4.34666107141259,0.02248410342384351,Original +694,0.23728813559322035,0.2068502824858757,-3.4595325465113227,0.04065396019819478,Original +695,0.3389830508474576,0.17711864406779662,-8.161598246688756,0.0038473182454779415,Original +696,0.2711864406779661,0.1730225988700565,-6.742490376010055,0.006662617945926082,Original +697,0.288135593220339,0.15176553672316384,-5.775098949219386,0.010322711204428422,Original +698,0.2033898305084746,0.1434322033898305,-13.161604379732514,0.0009475271511557978,Original +699,0.23728813559322035,0.18121468926553674,-2.37554254984018,0.0980116289989706,Original +700,0.21666666666666667,0.1440677966101695,-4.11529355279074,0.025994211990808435,Original +701,0.2711864406779661,0.19837570621468928,-3.4094802718971997,0.042166766315337265,Original +702,0.288135593220339,0.1731638418079096,-7.679245283018867,0.004587946327225194,Original +703,0.21666666666666667,0.1440677966101695,-4.945967306057795,0.015857343159367175,Original +704,0.4067796610169492,0.3418079096045198,-5.938574464184704,0.00954502881900154,Original +705,0.1864406779661017,0.13509887005649718,-4.199506762976739,0.02463973970623848,Original +706,0.1864406779661017,0.1346045197740113,-2.8897637795275584,0.21209033041734834,Original +707,0.2033898305084746,0.14343220338983054,-4.115302637249344,0.025994060763921405,Original +708,0.5932203389830508,0.4980225988700565,-2.5424731748068985,0.08449660749382112,Original +709,0.2711864406779661,0.17747175141242938,-4.301532117285934,0.023118328990842697,Original +710,0.3333333333333333,0.2076271186440678,-3.6796971261465163,0.03476456233229096,Original +711,0.23728813559322035,0.17309322033898306,-2.2132516655917454,0.11375613879223859,Original +712,0.2542372881355932,0.17323446327683617,-3.2772900040790653,0.046520687780565555,Original +713,0.2542372881355932,0.18149717514124292,-14.714285714285706,0.0006808928260169268,Original +714,0.5254237288135594,0.417725988700565,-10.18365359023622,0.0020178363490044415,Original +715,0.5254237288135594,0.4609227871939736,-2.429062741718821,0.13579717414652953,Original +716,0.2711864406779661,0.2024717514124294,-4.532253548113998,0.020100540272823354,Original +717,0.23728813559322035,0.14774011299435028,-3.52461629327718,0.03878859478065401,Original +718,0.23333333333333334,0.16101694915254236,-6.609891577527327,0.007050376214881156,Original +719,0.2711864406779661,0.21525423728813559,-6.513721780101715,0.007350361959320121,Original +720,0.5254237288135594,0.4175847457627119,-8.4047123277365,0.0035334622322826825,Original +721,0.3,0.1652542372881356,-6.045729350003519,0.009076571184230395,Original +722,0.23728813559322035,0.16440677966101697,-2.6381526558147996,0.07777904054371029,Original +723,0.3,0.19491525423728814,-3.201666232864797,0.04926900790572485,Original +724,0.1864406779661017,0.15176553672316384,-3.080747359192189,0.05410421984381787,Original +725,0.3220338983050847,0.19823446327683616,-7.231156579885582,0.005454152940167432,Original +726,0.26666666666666666,0.18220338983050846,-4.8345434590575715,0.016875322539984233,Original +727,0.2542372881355932,0.15621468926553672,-6.014126755966144,0.009211533394928783,Original +728,0.5254237288135594,0.4514124293785311,-2.773457470011942,0.06936804083315011,Original +729,0.2711864406779661,0.1942090395480226,-6.695816773490067,0.006795828785145915,Original +730,0.2542372881355932,0.19823446327683614,-4.580754969794434,0.019532183920823603,Original +731,0.2,0.11440677966101695,-4.899219625733926,0.016274424287796678,Original +732,0.3220338983050847,0.21504237288135594,-2.93071839321159,0.060965267594478456,Original +733,0.2542372881355932,0.1772598870056497,-4.332361052668546,0.022682607763125775,Original +734,0.2033898305084746,0.1348870056497175,-2.4242425425396985,0.09381189733415801,Original +735,0.23728813559322035,0.17747175141242938,-2.3175577935580045,0.10331445267128257,Original +736,0.5932203389830508,0.5064971751412429,-3.4740867918071037,0.04022706931462362,Original +737,0.35,0.2542372881355932,-5.230883563714916,0.013594564408150057,Original +738,0.15254237288135594,0.11377118644067798,-2.1108984192580493,0.12526011393630182,Original +739,0.2033898305084746,0.16031073446327682,-2.958934625443265,0.05959496241150818,Original +740,0.2542372881355932,0.1480225988700565,-3.1535457618776688,0.05112515625755018,Original +741,0.288135593220339,0.1812853107344633,-7.814244577012594,0.004362950284160135,Original +742,0.288135593220339,0.12655367231638417,-9.201899164855353,0.0027144108276950137,Original +743,0.2542372881355932,0.19830508474576272,-5.284229075567873,0.01321865065791954,Original +744,0.3559322033898305,0.1475988700564972,-7.72844712666813,0.00450420801537689,Original +745,0.18333333333333332,0.1440677966101695,-3.5889645674855384,0.037050266207125974,Original +746,0.22033898305084745,0.13919491525423727,-4.348997984611037,0.022451877691097657,Original +747,0.23333333333333334,0.2076271186440678,-6.0666666666666735,0.008988582402196459,Original +748,0.23728813559322035,0.16857344632768362,-3.433022580597172,0.04144644945716894,Original +749,0.21666666666666667,0.1694915254237288,-2.5768659443669355,0.08200254809923566,Original +750,0.22033898305084745,0.12217514124293785,-6.391226809216671,0.0077572568493554745,Original +751,0.2711864406779661,0.15204802259887007,-4.146966401618811,0.025473782147724618,Original +752,0.23728813559322035,0.1856638418079096,-7.414159169988326,0.005076328764679312,Original +753,0.3389830508474576,0.2827683615819209,-2.946612591077136,0.06018857902224991,Original +754,0.23728813559322035,0.16857344632768362,-3.9354028393368354,0.02922522554345655,Original +755,0.288135593220339,0.1855225988700565,-9.224885508045242,0.002694722054185784,Original +756,0.2542372881355932,0.18149717514124294,-5.610767520239283,0.011190314670533465,Original +757,0.3050847457627119,0.21532485875706214,-4.672257354206355,0.018515494454469265,Original +758,0.2711864406779661,0.19413841807909604,-14.324297025025746,0.000737365952799921,Original +759,0.15,0.1271186440677966,-4.67653718043597,0.018469641646674036,Original +760,0.2542372881355932,0.23022598870056496,-2.4285714285714284,0.13584143478190688,Original +761,0.2542372881355932,0.1571563088512241,-6.947685432148197,0.020094339591082675,Original +762,0.2033898305084746,0.12919020715630886,-5.005419915905,0.03767240357329794,Original +763,0.1694915254237288,0.13072033898305085,-2.4581295560193186,0.09101873468865801,Original +764,0.5084745762711864,0.4389830508474576,-4.148687032732543,0.025445896223527123,Original +765,0.2033898305084746,0.14745762711864407,-3.1135770454260725,0.052734401788326495,Original +766,0.1864406779661017,0.1432909604519774,-3.097057119520469,0.053418165120319126,Original +767,0.23728813559322035,0.15197740112994348,-5.438132165795431,0.012207529595140413,Original +768,0.22033898305084745,0.16871468926553673,-5.51623494926466,0.011732957333623041,Original +769,0.23333333333333334,0.15677966101694915,-2.5809523809523816,0.08171227084668078,Original +770,0.23728813559322035,0.1393361581920904,-11.45967949880383,0.001426179984263791,Original +771,0.21666666666666667,0.13983050847457626,-3.156608161329988,0.05100442714599591,Original +772,0.23728813559322035,0.20268361581920905,-2.734913303711143,0.07164396808065115,Original +773,0.22033898305084745,0.16318267419962337,-2.2651619180423666,0.15174721013539427,Original +774,0.2542372881355932,0.1814265536723164,-3.4427734423525815,0.04115269085326815,Original +775,0.2033898305084746,0.16031073446327682,-5.211581712072246,0.01373401507365159,Original +776,0.3559322033898305,0.2573446327683616,-5.309251443565037,0.01304699781574515,Original +777,0.3898305084745763,0.33709981167608283,-28.00000000000001,0.0012730749910096228,Original +778,0.3220338983050847,0.14759887005649716,-7.540447009277395,0.00483552316841547,Original +779,0.22033898305084745,0.1561440677966102,-14.299454098945166,0.0007411711769066464,Original +780,0.22033898305084745,0.13481638418079095,-4.017937620623259,0.027682186108019237,Original +781,0.23728813559322035,0.1426553672316384,-3.941176470588237,0.15819200517512308,Original +782,0.15254237288135594,0.10988700564971751,-2.589247891896882,0.08112688802619472,Original +783,0.21666666666666667,0.13559322033898305,-4.428506143912038,0.021390194677135307,Original +784,0.22033898305084745,0.16864406779661018,-3.4776799529026854,0.040122557696007824,Original +785,0.2711864406779661,0.17309322033898306,-7.501973136790743,0.004907258242497694,Original +786,0.3728813559322034,0.24449152542372882,-3.749896613676689,0.03311865459697996,Original +787,0.23333333333333334,0.1271186440677966,-7.236123373843309,0.005443417574107074,Original +788,0.1864406779661017,0.14613935969868172,-3.446237951244447,0.07486648577768765,Original +789,0.2542372881355932,0.16871468926553673,-4.7295667427478545,0.017913544696107814,Original +790,0.5254237288135594,0.4600988700564972,-2.332818394140734,0.10188575953545359,Original +791,0.22033898305084745,0.1814265536723164,-4.854861096609305,0.016683488889815655,Original +792,0.2542372881355932,0.1350282485875706,-5.941894164731409,0.027174489715421215,Original +793,0.2542372881355932,0.21518361581920906,-2.4427691887863823,0.09227205070358749,Original +794,0.38333333333333336,0.21610169491525422,-9.572072668100612,0.002419061759112068,Original +795,0.2711864406779661,0.19823446327683616,-5.967112085495144,0.009417228305284209,Original +796,0.15254237288135594,0.12238700564971752,-6.812191696797892,0.00647001003369456,Original +797,0.3898305084745763,0.22379943502824862,-6.706721318121227,0.006764394593913462,Original +798,0.288135593220339,0.17281073446327685,-5.072284754402177,0.014797870994727638,Original +799,0.1864406779661017,0.16299435028248588,-3.608695652173914,0.06894253641177729,Original +800,0.5254237288135594,0.38411016949152543,-7.205444253314168,0.005510176845082696,Original +801,0.4576271186440678,0.36563088512241054,-2.235697940503432,0.1548857125971983,Original +802,0.45,0.36864406779661013,-3.21491849579106,0.04877283804441102,Original +803,0.5254237288135594,0.4011299435028249,-4.050528318752622,0.027101807358158075,Original +804,0.4745762711864407,0.36737288135593216,-3.6999746626775307,0.03427858129419643,Original +805,0.4406779661016949,0.4135593220338983,-4.495611895592143,0.02054420972468061,Original +806,0.3728813559322034,0.3290960451977401,-3.025290226140455,0.05652149905978702,Original +807,0.5084745762711864,0.4639830508474576,-2.3727172680373334,0.09826222788434269,Original +808,0.4915254237288136,0.43898305084745765,-3.136812146700215,0.051791252064485205,Original +809,0.4166666666666667,0.35169491525423724,-2.252629691622204,0.10967084489225898,Original +810,0.4576271186440678,0.4091101694915254,-2.4465756114105797,0.09195951277963867,Original +811,0.4576271186440678,0.4260593220338983,-2.3835678188830465,0.0973040279791015,Original +812,0.3728813559322034,0.32902542372881355,-6.272727272727274,0.0081795212436495,Original +813,0.4406779661016949,0.3584039548022599,-2.8942424583125095,0.06279568913296162,Original +814,0.5423728813559322,0.3962570621468927,-4.31678224360102,0.022901451094550856,Original +815,0.5084745762711864,0.438771186440678,-3.9134326914916078,0.029654511991928277,Original +816,0.4406779661016949,0.3586158192090395,-11.057143071059498,0.0015845269383183932,Original +817,0.3728813559322034,0.3093220338983051,-2.0349703424076058,0.1788266478229615,Original +818,0.5254237288135594,0.4639124293785311,-3.1846701192429343,0.049914723854652635,Original +819,0.4576271186440678,0.40091807909604515,-2.9501131862499728,0.060019186645291704,Original +820,0.4745762711864407,0.41572504708097924,-2.4051807590126426,0.13797203364885854,Original +821,0.4067796610169492,0.3145009416195857,-9.800000000000015,0.010252475022698292,Original +822,0.5254237288135594,0.42664783427495295,-3.434253416143983,0.07533225481738264,Original +823,0.423728813559322,0.35847457627118645,-3.7468218576747336,0.03318862977572546,Original +824,0.48333333333333334,0.4152542372881356,-2.677777777777778,0.07519088494134536,Original +825,0.423728813559322,0.3415960451977401,-2.404160372045395,0.09551661130031602,Original +826,0.4666666666666667,0.3686440677966102,-3.7363704002836675,0.03342790419303777,Original +827,0.45,0.3855932203389831,-3.427505887692899,0.041613829518252106,Original +828,0.5254237288135594,0.45572033898305087,-2.7857691081092364,0.06866006993419116,Original +829,0.5333333333333333,0.4322033898305085,-5.013002700820275,0.015283219713031168,Original +830,0.5084745762711864,0.41786723163841805,-2.9961510064713686,0.057846064903379533,Original +831,0.576271186440678,0.384180790960452,-4.605541138054455,0.01924977204112997,Original +832,0.4745762711864407,0.4048022598870057,-3.1550201885686455,0.051066984905793825,Original +833,0.4576271186440678,0.3936911487758945,-1.6469089297144321,0.24133036369239244,Original +834,0.48333333333333334,0.38983050847457623,-6.0432055511403355,0.009087253623111777,Original +835,0.4067796610169492,0.3036723163841808,-4.41816098848226,0.021524602925384974,Original +836,0.5084745762711864,0.4557203389830508,-7.350069795231334,0.00520460105096178,Original +837,0.4406779661016949,0.3461864406779661,-5.940860662234962,0.009534706994175631,Original +838,0.4915254237288136,0.3879237288135593,-3.2460121747166957,0.04763324643436874,Original +839,0.5084745762711864,0.3924435028248588,-4.124951949583921,0.02583406935919925,Original +840,0.4166666666666667,0.38135593220338987,-1.7503501050350099,0.17836241760761135,Original +841,0.4915254237288136,0.396680790960452,-3.34946047620416,0.04407650716830172,Original +842,0.5,0.4406779661016949,-3.8340579025361636,0.03127462797536876,Original +843,0.4576271186440678,0.3418079096045198,-5.165514464459437,0.014074465381170985,Original +844,0.5084745762711864,0.3840395480225988,-6.480569535567828,0.007457659131028016,Original +845,0.5166666666666667,0.4491525423728813,-6.170953464957155,0.008566603032878853,Original +846,0.423728813559322,0.33728813559322035,-2.5791808608632842,0.08183795390499486,Original +847,0.4745762711864407,0.3992467043314501,-2.0879830038875222,0.17203960039889202,Original +848,0.559322033898305,0.4053672316384181,-5.498322668127967,0.01183963469227656,Original +849,0.4576271186440678,0.36292372881355933,-6.299107729795289,0.008082951481625023,Original +850,0.45,0.3771186440677966,-2.0909375180606187,0.12766278225478178,Original +851,0.4067796610169492,0.3423728813559322,-1.7039616353344613,0.2305022123029592,Original +852,0.4576271186440678,0.32902542372881355,-6.562385865517576,0.007196507966961928,Original +853,0.4406779661016949,0.3251412429378531,-5.210719601593815,0.013740287056139356,Original +854,0.4067796610169492,0.32627118644067793,-2.0939473213563398,0.17129801016782423,Original +855,0.4067796610169492,0.32937853107344633,-3.048964780184798,0.05547328824097628,Original +856,0.5084745762711864,0.39661016949152544,-7.7424126813571705,0.004480807680029333,Original +857,0.4576271186440678,0.37608286252354045,-1.5525897646537226,0.2607163805465733,Original +858,0.423728813559322,0.3336864406779661,-3.1952478845632513,0.04951160636548708,Original +859,0.4745762711864407,0.3458333333333333,-9.810579653543469,0.002251005507538249,Original +860,0.4745762711864407,0.36257062146892655,-4.233384268372494,0.0241205868939632,Original +861,0.4406779661016949,0.4009180790960452,-4.384410769545493,0.02197077351423448,Original +862,0.4,0.3220338983050847,-5.039047529047533,0.015067470993325742,Original +863,0.4576271186440678,0.37959039548022605,-3.066783460464022,0.05470043251723667,Original +864,0.5,0.42372881355932207,-2.84604989415154,0.06532071006198013,Original +865,0.4576271186440678,0.3458333333333333,-3.64966532468076,0.03550063326417129,Original +866,0.5084745762711864,0.358545197740113,-3.7292186611760196,0.033592909525737194,Original +867,0.4576271186440678,0.3461864406779661,-3.667546560934897,0.03505999613164613,Original +868,0.4406779661016949,0.34173728813559323,-3.266799462776711,0.046890135550055285,Original +869,0.5423728813559322,0.40508474576271186,-5.713299488454646,0.010638289400887548,Original +870,0.4067796610169492,0.2995056497175141,-4.670840095116222,0.018530711133626115,Original +871,0.4166666666666667,0.3644067796610169,-3.5603266600026933,0.03781136121703687,Original +872,0.3389830508474576,0.295409604519774,-3.8525647377658387,0.030886920710077346,Original +873,0.4915254237288136,0.3878531073446328,-2.7019645629286955,0.07366341268340729,Original +874,0.43333333333333335,0.3516949152542373,-2.830478177733986,0.06616357500554147,Original +875,0.36666666666666664,0.32627118644067793,-2.1497076400880912,0.12074123791130136,Original +876,0.4406779661016949,0.36285310734463283,-3.674885279264579,0.03488117727992117,Original +877,0.423728813559322,0.27415254237288134,-8.239062446111248,0.0037434491451463214,Original +878,0.4915254237288136,0.32916666666666666,-28.126664663270653,9.866049174120721e-05,Original +879,0.43333333333333335,0.3347457627118644,-2.188743887105837,0.11639121058222453,Original +880,0.5423728813559322,0.4218220338983051,-11.810674379463547,0.0013048196771810819,Original +881,0.3559322033898305,0.3093220338983051,-1.5714285714285712,0.25668888376056537,Original +882,0.5254237288135594,0.47662429378531074,-2.224165710326518,0.11260576823386355,Original +883,0.559322033898305,0.396680790960452,-4.238952655416746,0.024036610732601344,Original +884,0.3728813559322034,0.3206920903954802,-3.753591024171325,0.03303482712591477,Original +885,0.5254237288135594,0.40946327683615824,-3.475538999555497,0.04018478833317944,Original +886,0.4576271186440678,0.379590395480226,-3.6545153763584652,0.03538041968158101,Original +887,0.4915254237288136,0.40494350282485875,-2.9327458060449008,0.06086550016185654,Original +888,0.4915254237288136,0.43898305084745765,-2.551058736525074,0.08386536725088448,Original +889,0.4,0.35593220338983056,-2.84815729902686,0.06520767196759084,Original +890,0.4745762711864407,0.31645480225988704,-39.57411910381416,3.5500916860886294e-05,Original +891,0.5254237288135594,0.4260593220338983,-8.799079181288127,0.0030926117525078255,Original +892,0.4406779661016949,0.3880649717514124,-2.6501717516402645,0.07698251884827165,Original +893,0.4915254237288136,0.41793785310734466,-3.015997915032065,0.05693971913913344,Original +894,0.45,0.37711864406779666,-3.8784935044945925,0.030354042786245917,Original +895,0.4745762711864407,0.41619585687382293,-1.4090909090909103,0.29417736761437996,Original +896,0.5,0.39830508474576276,-9.295160030897799,0.0026356894602544566,Original +897,0.38333333333333336,0.3305084745762712,-3.5988166779486694,0.03679295873270534,Original +898,0.4166666666666667,0.3644067796610169,-1.628834176702405,0.20183517132355724,Original +899,0.5084745762711864,0.3845338983050847,-2.6313736841918445,0.07823278504898863,Original +900,0.559322033898305,0.4138418079096045,-3.773628023053654,0.032584885614146145,Original +901,0.4576271186440678,0.3712806026365348,-1.8625711948272987,0.20356174355735687,Original +902,0.4915254237288136,0.409180790960452,-4.9902021373122745,0.015475409518374506,Original +903,0.4576271186440678,0.3536723163841808,-3.085580966165473,0.09093394434924655,Original +904,0.4406779661016949,0.3163841807909604,-10.370899457402706,0.0019128148288002538,Original +905,0.4915254237288136,0.40070621468926554,-5.159875560151832,0.014116891909403921,Original +906,0.4067796610169492,0.33319209039548026,-4.493884775113415,0.02056543411778116,Original +907,0.5166666666666667,0.4152542372881356,-2.847055192728138,0.0652667568561372,Original +908,0.3898305084745763,0.33145009416195853,-11.716898663286049,0.007205446408459767,Original +909,0.3559322033898305,0.29936440677966103,-1.9119123291898108,0.1518400483442069,Original +910,0.3728813559322034,0.32005649717514123,-2.143768150852684,0.1652700646335296,Original +911,0.3220338983050847,0.2693973634651601,-2.180850112451567,0.1609698801291486,Original +912,0.4745762711864407,0.4132768361581921,-2.677380542667445,0.07521629738306508,Original +913,0.5333333333333333,0.3771186440677966,-7.008990210800726,0.005964312251936928,Original +914,0.576271186440678,0.400635593220339,-6.348003491637961,0.007907886129610865,Original +915,0.4915254237288136,0.3949152542372881,-18.999999999999993,0.03347541671314822,Original +916,0.4166666666666667,0.3474576271186441,-3.086709862908689,0.05385213329290739,Original +917,0.4915254237288136,0.3258003766478343,-15.805266578356209,0.003979221225814003,Original +918,0.3898305084745763,0.3459039548022599,-2.838910304269998,0.06570548867709089,Original +919,0.5254237288135594,0.4134887005649718,-3.585413143844187,0.037143581857500066,Original +920,0.5254237288135594,0.43848870056497175,-3.2975199061269667,0.04581859729168717,Original +921,0.4745762711864407,0.4009180790960452,-4.901036565932749,0.016257948615872354,Original +922,0.4576271186440678,0.3584039548022599,-3.1383427887418938,0.051729870414491945,Original +923,0.559322033898305,0.5021892655367232,-6.111799193237621,0.008802686470096136,Original +924,0.4745762711864407,0.3416666666666667,-6.884772011368271,0.00627716185131719,Original +925,0.423728813559322,0.3459039548022599,-2.5149332127933626,0.08656123080121365,Original +926,0.4745762711864407,0.39668079096045206,-3.806413998599051,0.03186546090943339,Original +927,0.4576271186440678,0.34187853107344635,-4.207255276582219,0.02451973533970688,Original +928,0.5084745762711864,0.3376412429378531,-4.4091197996331335,0.021642966336736617,Original +929,0.45,0.3728813559322034,-2.2290356659326926,0.11209698013636105,Original +930,0.4166666666666667,0.3474576271186441,-3.0867098629086884,0.05385213329290739,Original +931,0.45,0.3728813559322034,-3.7150594432211537,0.033922680348442695,Original +932,0.4576271186440678,0.3629943502824859,-3.5263157894736845,0.03874136219588886,Original +933,0.4745762711864407,0.3627824858757063,-5.7192755125875685,0.010607225660250866,Original +934,0.45,0.2923728813559322,-10.891044814105634,0.001656669068269899,Original +935,0.3898305084745763,0.34173728813559323,-17.461538461538474,0.0004093729804445589,Original +936,0.5166666666666667,0.4279661016949152,-4.429570467592002,0.021376428303786093,Original +937,0.3898305084745763,0.33319209039548026,-1.7611959878594554,0.17642304434957817,Original +938,0.4166666666666667,0.2966101694915254,-4.147575310031268,0.025463909186015793,Original +939,0.5084745762711864,0.4387005649717514,-6.879715746378599,0.006290349260396344,Original +940,0.4915254237288136,0.3545197740112994,-10.777777777777775,0.0017083811519259897,Original +941,0.4067796610169492,0.32895480225988705,-4.070774182545795,0.026749143973066495,Original +942,0.48333333333333334,0.40254237288135597,-2.414985892866683,0.09459301523161902,Original +943,0.4915254237288136,0.4221751412429378,-3.7480916030534375,0.03315971002355427,Original +944,0.4576271186440678,0.37139830508474575,-6.762389833500976,0.006606867955287357,Original +945,0.4576271186440678,0.33742937853107347,-4.058782494615296,0.02695730866033387,Original +946,0.4067796610169492,0.3629943502824859,-2.8181818181818175,0.06683875138704513,Original +947,0.5333333333333333,0.4152542372881356,-2.558118456493597,0.08335065260117518,Original +948,0.4067796610169492,0.2911016949152542,-2.67070108252106,0.07564520129729006,Original +949,0.5254237288135594,0.37139830508474575,-5.336986813075771,0.012860119949256482,Original +950,0.3220338983050847,0.2701271186440678,-4.975196209154729,0.015603617644457647,Original +951,0.5254237288135594,0.4682909604519774,-4.883695241304617,0.01641608650487274,Original +952,0.4576271186440678,0.34985875706214686,-2.370422697154792,0.09846632715247584,Original +953,0.5254237288135594,0.39272598870056497,-3.3006539570698283,0.04571103238074162,Original +954,0.55,0.4322033898305085,-4.815101245041479,0.017061585926028165,Original +955,0.559322033898305,0.4262005649717514,-4.051889951012785,0.027077901844717624,Original +956,0.4166666666666667,0.35169491525423724,-2.9151315524126016,0.06173916082876997,Original +957,0.4915254237288136,0.3755649717514124,-10.56303549746513,0.0018124532293534081,Original +958,0.423728813559322,0.2911016949152542,-10.742132190804174,0.0017250974273507926,Original +959,0.4666666666666667,0.3686440677966102,-2.812229917701684,0.06716864584643882,Original +960,0.4915254237288136,0.37570621468926557,-5.0089472186085136,0.015317175750995964,Original +961,0.4915254237288136,0.3753531073446328,-3.3382526588579973,0.044445211846365185,Original +962,0.4745762711864407,0.4302966101694915,-2.890628912501168,0.06298074227793468,Original +963,0.45,0.39830508474576276,-3.185621103989269,0.049878312712040275,Original +964,0.5084745762711864,0.35868644067796607,-5.327850954897623,0.012921287882286202,Original +965,0.4406779661016949,0.31207627118644066,-3.533152576889349,0.03855209262090739,Original +966,0.4406779661016949,0.3608757062146893,-1.4303797468354427,0.38842195960522763,Original +967,0.4067796610169492,0.29555084745762716,-7.084467611285733,0.005784199991243675,Original +968,0.4745762711864407,0.3987758945386064,-5.717277635038901,0.029257011705986744,Original +969,0.4067796610169492,0.32888418079096043,-2.287766368648155,0.10617410540553696,Original +970,0.4745762711864407,0.42627118644067796,-3.519233773599045,0.03893867159865643,Original +971,0.4915254237288136,0.3332627118644068,-7.238793509124532,0.005437657782298041,Original +972,0.5254237288135594,0.4092514124293785,-6.2886655641721925,0.008120996607005396,Original +973,0.48333333333333334,0.4025423728813559,-2.8010960513215215,0.06779122416384425,Original +974,0.5084745762711864,0.40473163841807913,-3.46978940590354,0.040352519825929224,Original +975,0.4576271186440678,0.3502824858757062,-4.196397990844169,0.024688099628686736,Original +976,0.4745762711864407,0.417725988700565,-7.10832364958241,0.005728765340993184,Original +977,0.5084745762711864,0.46836158192090394,-9.941988596598865,0.0021649872660866308,Original +978,0.4745762711864407,0.2995762711864407,-10.04928647585347,0.00209795503470734,Original +979,0.45,0.3220338983050848,-3.6987295116025987,0.03430817170101989,Original +980,0.3898305084745763,0.33312146892655364,-2.347903032601037,0.1004970270572768,Original +981,0.5254237288135594,0.3836864406779661,-3.485462485693465,0.03989737572645941,Original +982,0.4576271186440678,0.3925141242937853,-3.5865984774856337,0.03711240322472348,Original +983,0.36666666666666664,0.326271186440678,-3.788162541206022,0.03226341112647686,Original +984,0.5423728813559322,0.3923728813559322,-20.347964212104664,0.00025950431091567957,Original +985,0.4915254237288136,0.4134887005649717,-3.901282639398381,0.029895400966674732,Original +986,0.5166666666666667,0.3983050847457627,-5.867173552077365,0.009874902457062155,Original +987,0.38333333333333336,0.3220338983050848,-2.4570411640937726,0.09110685313385442,Original +988,0.45,0.40254237288135597,-3.2790242451070704,0.04645996816866966,Original +989,0.559322033898305,0.38418079096045193,-4.595745763298827,0.019360742557531358,Original +990,0.48333333333333334,0.3940677966101695,-4.750402896977879,0.017701003201901513,Original +991,0.5084745762711864,0.363135593220339,-4.5211227964641365,0.02023399027426734,Original +992,0.4915254237288136,0.4175141242937853,-2.8647998103065557,0.06432351407478958,Original +993,0.5084745762711864,0.3798022598870056,-14.122758056462086,0.0007690037706351588,Original +994,0.423728813559322,0.33764124293785314,-3.4045365361503497,0.042320045665658175,Original +995,0.4067796610169492,0.3712570621468927,-6.263567998756802,0.008213404490252622,Original +996,0.5254237288135594,0.4639830508474576,-3.2766095606229877,0.04654453906356892,Original +997,0.3728813559322034,0.3123587570621469,-5.08813713983205,0.01467150017877107,Original +998,0.48333333333333334,0.3728813559322034,-3.5693253330753327,0.03757007584643612,Original +999,0.45,0.364406779661017,-5.27455297545764,0.01328581900289993,Original +1000,0.4406779661016949,0.35451977401129947,-6.7777777777777715,0.006564177674076621,Original +1001,0.4,0.27118644067796605,-2.510197384488104,0.0869224831963583,Original +1002,0.4067796610169492,0.30338983050847457,-5.263055373168918,0.034257089055649254,Original +1003,0.3728813559322034,0.3077683615819209,-1.8092905922289133,0.16810978065452678,Original +1004,0.423728813559322,0.33742937853107347,-2.129023252677582,0.12312500629142373,Original +1005,0.3559322033898305,0.3163841807909604,-4.041451884327386,0.02726185440936123,Original +1006,0.5254237288135594,0.4301553672316384,-2.476997933163102,0.08950755799392057,Original +1007,0.43333333333333335,0.3305084745762712,-3.9024896268095732,0.029871358819325514,Original +1008,0.423728813559322,0.36687853107344637,-2.115310434853906,0.12473633813156884,Original +1009,0.5254237288135594,0.40487288135593225,-4.138026787575419,0.025619300883706696,Original +1010,0.423728813559322,0.3541431261770245,-2.568187024807373,0.1240305945387,Original +1011,0.3728813559322034,0.29103107344632767,-3.7674177162295606,0.03272349692653532,Original +1012,0.3220338983050847,0.26151129943502827,-3.8711758641214336,0.03050322842853845,Original +1013,0.3389830508474576,0.2489406779661017,-6.249324287797364,0.008266464021748235,Original +1014,0.423728813559322,0.3287429378531073,-1.9757525677542294,0.1426456503116112,Original +1015,0.4915254237288136,0.46002824858757063,-3.229172176597325,0.048246204514222006,Original +1016,0.4067796610169492,0.27019774011299436,-5.5858638786774515,0.011330025903708709,Original +1017,0.3220338983050847,0.26596045197740115,-3.6973851037027248,0.03434015761009596,Original +1018,0.3728813559322034,0.274364406779661,-4.1604734938620584,0.02525593036334952,Original +1019,0.36666666666666664,0.3347457627118644,-1.9898190482959088,0.14070862393531006,Original +1020,0.4067796610169492,0.35437853107344636,-2.9292618143712588,0.06103707112726652,Original +1021,0.3898305084745763,0.3251412429378531,-2.3178660050430633,0.10328535861126524,Original +1022,0.423728813559322,0.3207627118644068,-9.7408025920138,0.002298526341652219,Original +1023,0.3898305084745763,0.2786723163841808,-7.110897442499789,0.005722826489597031,Original +1024,0.4406779661016949,0.32514124293785307,-3.3894381008331114,0.04279257405370464,Original +1025,0.3898305084745763,0.31631355932203387,-4.476394699131555,0.020781977238118643,Original +1026,0.3050847457627119,0.27838983050847455,-1.909090909090913,0.15226214753320294,Original +1027,0.4576271186440678,0.32090395480225986,-4.966780972281921,0.015676121865843336,Original +1028,0.4406779661016949,0.36278248587570616,-8.000108500500483,0.004076417682476059,Original +1029,0.45,0.3771186440677966,-3.8784935044945983,0.030354042786245806,Original +1030,0.3728813559322034,0.3313559322033898,-4.454545454545456,0.04688018499081239,Original +1031,0.423728813559322,0.3502824858757062,-3.8334908600273225,0.03128660551954041,Original +1032,0.5423728813559322,0.3545197740112994,-6.51303802142941,0.0073525543697032375,Original +1033,0.3559322033898305,0.2870762711864407,-4.234130868902486,0.024109305527904665,Original +1034,0.3559322033898305,0.29950564971751414,-1.737938290656899,0.18061180344428132,Original +1035,0.38333333333333336,0.288135593220339,-2.647722058442962,0.07714404224936885,Original +1036,0.4915254237288136,0.35466101694915253,-7.02229226282608,0.005932038385975604,Original +1037,0.423728813559322,0.3632062146892655,-2.200120786201923,0.1151589518445011,Original +1038,0.4406779661016949,0.35444915254237286,-4.143365599990044,0.025532267312653276,Original +1039,0.423728813559322,0.3882062146892655,-2.937104220060188,0.060651717076612384,Original +1040,0.4406779661016949,0.3415960451977401,-3.9152539744439387,0.029618618166276877,Original +1041,0.4745762711864407,0.3203389830508475,-4.2850226450669,0.023356119365012706,Original +1042,0.4067796610169492,0.34173728813559323,-5.289174189736959,0.013184494475533716,Original +1043,0.3898305084745763,0.29103107344632767,-3.585517945697556,0.03714082383617849,Original +1044,0.559322033898305,0.48128531073446335,-3.3967856291561485,0.04256178912971218,Original +1045,0.423728813559322,0.3377824858757062,-3.3339462897065575,0.04458791920005854,Original +1046,0.4666666666666667,0.34745762711864403,-3.119513763428299,0.052491375678345144,Original +1047,0.4915254237288136,0.3121468926553672,-9.141660034508423,0.00276691986404339,Original +1048,0.38333333333333336,0.3305084745762712,-10.796450033846,0.0016997103571064131,Original +1049,0.4576271186440678,0.3672316384180791,-2.274140974459845,0.10751386463711397,Original +1050,0.4745762711864407,0.4007768361581921,-6.242283371885267,0.008292858606378618,Original +1051,0.45,0.37711864406779666,-8.982407047314,0.0029123702976661856,Original +1052,0.43333333333333335,0.288135593220339,-5.089358370900108,0.014661823332742536,Original +1053,0.423728813559322,0.3121468926553672,-3.4347826086956523,0.04139322928919692,Original +1054,0.4067796610169492,0.2641242937853107,-11.821155866809187,0.007080250048172652,Original +1055,0.4915254237288136,0.40084745762711865,-8.576023546581505,0.003332372210409887,Original +1056,0.4067796610169492,0.3121468926553672,-4.822765530016256,0.016987841571810883,Original +1057,0.423728813559322,0.3081214689265537,-10.058098809188749,0.002092572636159546,Original +1058,0.3898305084745763,0.32561205273069677,-3.1284403669724816,0.08877909183169339,Original +1059,0.4915254237288136,0.2994350282485876,-5.277823574803065,0.013263065803865479,Original +1060,0.4067796610169492,0.2915960451977401,-2.792542124181373,0.06827442570311215,Original +1061,0.4067796610169492,0.29237288135593215,-3.5351298218230793,0.07153794820044,Original +1062,0.4576271186440678,0.379590395480226,-2.6941204566469295,0.07415454501482839,Original +1063,0.423728813559322,0.3334039548022599,-3.67018433388411,0.03499558816381141,Original +1064,0.3898305084745763,0.29971751412429376,-3.1708738954340325,0.050446746960355386,Original +1065,0.5423728813559322,0.35014124293785315,-8.799712020856967,0.003091964525888444,Original +1066,0.4666666666666667,0.3644067796610169,-3.63823689123835,0.03578597061066121,Original +1067,0.45,0.3940677966101695,-2.6400000000000006,0.07765595434454427,Original +1068,0.423728813559322,0.346045197740113,-4.6989671174421845,0.01823173060555818,Original +1069,0.4067796610169492,0.3163841807909604,-9.237604307034024,0.002683909079797024,Original +1070,0.4406779661016949,0.3204802259887005,-6.160684868447694,0.008606983305788924,Original +1071,0.45,0.2838983050847458,-7.452597186167861,0.005001392247010432,Original +1072,0.3898305084745763,0.2701271186440678,-5.680855091846446,0.01080903321709519,Original +1073,0.4067796610169492,0.3165960451977401,-4.368550947552518,0.022184572544257958,Original +1074,0.4406779661016949,0.32055084745762713,-6.009400282160132,0.009231944143766397,Original +1075,0.3728813559322034,0.2641242937853107,-3.6019080768824088,0.06917625132966021,Original +1076,0.4067796610169492,0.3248587570621469,-6.653056282246297,0.006920964466083356,Original +1077,0.4067796610169492,0.34173728813559323,-2.5913248074535873,0.08098113762773655,Original +1078,0.4067796610169492,0.35451977401129947,-2.4666666666666663,0.09033115673485469,Original +1079,0.4067796610169492,0.3712570621468927,-2.1556241033270016,0.12006957075894573,Original +1080,0.4067796610169492,0.3207627118644068,-3.6925370142833875,0.03445581989413703,Original +1081,0.423728813559322,0.3080508474576271,-6.565459858828986,0.007186932185349032,Original +1082,0.423728813559322,0.34625706214689267,-2.534027979956117,0.08512323508130125,Original +1083,0.4915254237288136,0.33340395480225987,-6.185153671923991,0.008511170916897891,Original +1084,0.4745762711864407,0.35861581920903957,-6.26717557251908,0.008200036888372677,Original +1085,0.4576271186440678,0.291454802259887,-4.773212266552967,0.01747207617796873,Original +1086,0.4745762711864407,0.3627824858757063,-4.484419263456089,0.020682260794596567,Original +1087,0.5423728813559322,0.37097457627118646,-5.427900519255732,0.012271547258470204,Original +1088,0.3220338983050847,0.29764595103578156,-1.787227246173595,0.21581013089073905,Original +1089,0.4406779661016949,0.32895480225988705,-7.500747797147324,0.0049095659068835425,Original +1090,0.4745762711864407,0.34173728813559323,-9.414301217197753,0.0025394604898687696,Original +1091,0.2711864406779661,0.22768361581920904,-2.8518518518518525,0.06501008999538796,Original +1092,0.38333333333333336,0.2966101694915254,-4.066314336189686,0.026826320116124475,Original +1093,0.4576271186440678,0.32069209039548024,-5.480791700836992,0.011945272816605556,Original +1094,0.3728813559322034,0.3080508474576271,-3.679543437365697,0.034768279257972504,Original +1095,0.3898305084745763,0.29096045197740117,-3.1558437213360135,0.05103452997518892,Original +1096,0.423728813559322,0.3584745762711864,-3.746821857674737,0.0331886297757254,Original +1097,0.4745762711864407,0.3586158192090395,-15.62463762709096,0.0005697357387431864,Original +1098,0.3898305084745763,0.278954802259887,-2.9455339690026325,0.06024089377159606,Original +1099,0.4067796610169492,0.32492937853107345,-3.107327943782055,0.05299174949102144,Original +1100,0.423728813559322,0.35444915254237286,-4.977643693767403,0.01558261256489159,Original +1101,0.3728813559322034,0.314406779661017,-1.5149901793251697,0.2689983409780041,Original +1102,0.4576271186440678,0.417725988700565,-3.7729601826121053,0.03259975536350789,Original +1103,0.3050847457627119,0.253319209039548,-2.5734510578167744,0.0822460968971868,Original +1104,0.3220338983050847,0.2942090395480226,-2.558441558441555,0.2372076297039761,Original +1105,0.4915254237288136,0.27394067796610166,-7.9213555129905915,0.004194695908330953,Original +1106,0.3559322033898305,0.325,-3.3471216807729713,0.04415312534526643,Original +1107,0.4745762711864407,0.40918079096045196,-3.9630593303183272,0.028696120256434428,Original +1108,0.4576271186440678,0.2911016949152542,-3.4316055779260313,0.0414893603948339,Original +1109,0.559322033898305,0.400635593220339,-8.675158396579372,0.0032228830687179013,Original +1110,0.423728813559322,0.3427495291902072,-3.250494467879352,0.08302849336823244,Original +1111,0.4745762711864407,0.38008474576271184,-3.4264466597407415,0.04164606550795528,Original +1112,0.4915254237288136,0.320409604519774,-6.489451756285192,0.007428710958179485,Original +1113,0.4067796610169492,0.36278248587570616,-4.518646959031556,0.02026382990059579,Original +1114,0.559322033898305,0.38806497175141247,-9.914490233494313,0.002182622087591197,Original +1115,0.38333333333333336,0.3135593220338983,-6.377512576754887,0.007804638131363213,Original +1116,0.3559322033898305,0.2616525423728814,-8.372415728614014,0.0035731576383694914,Original +1117,0.423728813559322,0.3500706214689266,-2.6979742584262723,0.07391274673643934,Original +1118,0.45,0.3686440677966102,-7.629306376694646,0.004675053414430021,Original +1119,0.45,0.3728813559322034,-2.1448905695991947,0.12129141727935283,Original +1120,0.4406779661016949,0.3162429378531073,-3.2177938896146965,0.04866601725501004,Original +1121,0.4406779661016949,0.2699152542372881,-6.495776752201798,0.007408187085051301,Original +1122,0.45,0.3432203389830508,-3.505866051946874,0.039314608332933376,Original +1123,0.423728813559322,0.3168785310734463,-3.110976497704088,0.052841304640431436,Original +1124,0.4576271186440678,0.3041666666666667,-4.0423517251725425,0.027245933023793766,Original +1125,0.3898305084745763,0.32627118644067793,-1.6531163063339527,0.24012067051362174,Original +1126,0.3898305084745763,0.325894538606403,-9.506969760375863,0.01088379244125519,Original +1127,0.4406779661016949,0.3924435028248588,-4.386052107630403,0.021948799914748462,Original +1128,0.3050847457627119,0.2489406779661017,-3.2234880893826983,0.04845534939455295,Original +1129,0.36666666666666664,0.2966101694915254,-6.403332465729597,0.00771574670699146,Original +1130,0.4067796610169492,0.3416666666666667,-4.837646025991297,0.016845843964025876,Original +1131,0.4576271186440678,0.3288841807909605,-6.129713906579408,0.0087302972100172,Original +1132,0.4915254237288136,0.3926553672316384,-3.4156502553198673,0.04197645602545185,Original +1133,0.45,0.3771186440677966,-3.6395833778303657,0.03575220047050311,Original +1134,0.3898305084745763,0.24879943502824858,-5.13286206856859,0.014322469145621968,Original +1135,0.3559322033898305,0.25838041431261766,-2.478383793597629,0.13145422697640002,Original +1136,0.4067796610169492,0.3209745762711865,-3.005502067896227,0.05741679214585345,Original +1137,0.3728813559322034,0.29103107344632767,-3.1775375076563694,0.05018888898927705,Original +1138,0.4406779661016949,0.38411016949152543,-2.4611692476881775,0.09077318658872167,Original +1139,0.4067796610169492,0.3458333333333333,-4.644284279214491,0.018818859918736594,Original +1140,0.423728813559322,0.32055084745762713,-4.426845916974451,0.021411691624411183,Original +1141,0.4576271186440678,0.37153954802259886,-3.1115472159109827,0.052817820285826836,Original +1142,0.4576271186440678,0.4094632768361582,-3.037748906538044,0.055966806959142974,Original +1143,0.4576271186440678,0.40494350282485875,-3.713511479606386,0.03395898358756934,Original +1144,0.5,0.36864406779661013,-5.628616053819864,0.011091567639778466,Original +1145,0.4406779661016949,0.3547316384180791,-2.9144455089837993,0.061773504814927487,Original +1146,0.4406779661016949,0.3204802259887006,-7.80733944954129,0.0043741006479608106,Original +1147,0.5,0.3813559322033898,-3.1047293380092054,0.05309922984140076,Original +1148,0.4067796610169492,0.35444915254237286,-2.8491361177963865,0.06515525258058204,Original +1149,0.4067796610169492,0.26560734463276836,-6.047006896155811,0.009071170018542232,Original +1150,0.423728813559322,0.35000000000000003,-2.2152876697379726,0.11354046971526367,Original +1151,0.45,0.3347457627118644,-4.554467869037332,0.019837599235630846,Original +1152,0.3728813559322034,0.34180790960451973,-6.35085296108589,0.007897838244939661,Original +1153,0.4,0.33050847457627114,-2.733333333333336,0.07173922515901705,Original +1154,0.423728813559322,0.346045197740113,-13.339459376998303,0.0009106232895004249,Original +1155,0.3898305084745763,0.2702683615819209,-5.290392517155741,0.013176097120409095,Original +1156,0.4745762711864407,0.3712570621468927,-4.1945951901363205,0.024716200055892694,Original +1157,0.4745762711864407,0.3499293785310735,-3.9622923434347794,0.02871062689188144,Original +1158,0.36666666666666664,0.3305084745762712,-3.3049457887636606,0.045564249549415244,Original +1159,0.4,0.326271186440678,-5.094198380791346,0.014623553591468103,Original +1160,0.4406779661016949,0.3377824858757062,-5.297341314999525,0.013128335633175506,Original +1161,0.4576271186440678,0.3799435028248587,-4.13405315401556,0.02568432777343715,Original +1162,0.423728813559322,0.34625706214689267,-3.451648730864812,0.040887609276162,Original +1163,0.3559322033898305,0.25727401129943506,-3.278863962441917,0.0464655758073765,Original +1164,0.43333333333333335,0.34745762711864403,-4.256851455445149,0.02376922627056341,Original +1165,0.3728813559322034,0.3163841807909604,-3.333333333333336,0.044608278994442765,Original +1166,0.3559322033898305,0.30783898305084745,-3.2916992909941314,0.04601922125686048,Original +1167,0.423728813559322,0.3500706214689266,-3.1298749223282756,0.052070602215977944,Original +1168,0.2711864406779661,0.21920903954802262,-2.9631204814438217,0.09751860335760698,Original +1169,0.423728813559322,0.3543785310734463,-6.117934775079367,0.008777805575829042,Original +1170,0.3728813559322034,0.35021186440677965,-5.6736454811631605,0.010847461425876656,Original +1171,0.423728813559322,0.3694915254237288,-1.777777777777776,0.32619726158657,Original +1172,0.4406779661016949,0.3669491525423729,-3.4506787823167455,0.040916473140808284,Original +1173,0.4,0.2923728813559322,-6.160404875922859,0.00860808784608318,Original +1174,0.5084745762711864,0.32090395480225986,-7.2901037265115765,0.0053285197621824095,Original +1175,0.3898305084745763,0.286864406779661,-3.6376865077145064,0.03579978607225466,Original +1176,0.4406779661016949,0.39237288135593223,-3.433070549496972,0.04144499780826165,Original +1177,0.4067796610169492,0.3372881355932204,-3.3003043252568145,0.08083454343693727,Original +1178,0.3389830508474576,0.2921845574387947,-3.052528420785673,0.09264712636043493,Original +1179,0.4406779661016949,0.37966101694915255,-4.431293675255974,0.02135416398761436,Original +1180,0.4067796610169492,0.3416666666666667,-3.3728798057819063,0.043318525364938,Original +1181,0.3559322033898305,0.295409604519774,-5.351131248404091,0.012766161878428656,Original +1182,0.4406779661016949,0.3373587570621469,-4.004253706903789,0.02793063892749739,Original +1183,0.4067796610169492,0.3082627118644068,-3.4319134544954104,0.04148003219890615,Original +1184,0.423728813559322,0.34187853107344635,-4.233170395077674,0.024123819842227313,Original +1185,0.4915254237288136,0.3927966101694915,-2.484400110285176,0.08892311315035355,Original +1186,0.4745762711864407,0.3501412429378531,-6.005437958250537,0.009249100739188856,Original +1187,0.5254237288135594,0.3246468926553672,-8.378851501317461,0.003565200635279848,Original +1188,0.4915254237288136,0.3753531073446328,-3.92529890966906,0.029421652663864435,Original +1189,0.4576271186440678,0.3626412429378531,-4.119009202618319,0.02593245296220258,Original +1190,0.43333333333333335,0.326271186440678,-2.5654413105709444,0.08282085362511153,Original +1191,0.2711864406779661,0.2235169491525424,-2.8217002208427564,0.06664468710770881,Original +1192,0.559322033898305,0.4976694915254237,-2.725345181640537,0.0722232481118407,Original +1193,0.4067796610169492,0.3484934086629002,-3.5009766627567247,0.07279105093612302,Original +1194,0.3389830508474576,0.3038135593220339,-4.999032554530682,0.015400604884304166,Original +1195,0.3333333333333333,0.30508474576271183,-4.082482904638637,0.02654788546719936,Original +1196,0.4406779661016949,0.3290960451977401,-4.253217026259508,0.023823207735453863,Original +1197,0.3728813559322034,0.3121468926553672,-3.9090909090909123,0.02974030553139564,Original +1198,0.3728813559322034,0.3165960451977401,-3.096482698963895,0.05344214083896168,Original +1199,0.423728813559322,0.3545197740112994,-6.4902085496187105,0.007426251317674435,Original +1200,0.4745762711864407,0.3628531073446328,-2.838043708162847,0.06575238490643358,Original +1201,0.4576271186440678,0.3163841807909605,-4.003203845127177,0.027949819151750765,Original +1202,0.3898305084745763,0.33354519774011293,-2.8433656242933654,0.06546504459353619,Original +1203,0.423728813559322,0.37966101694915255,-4.5490523794544675,0.01990128684164212,Original +1204,0.3728813559322034,0.30783898305084745,-2.3170196256606355,0.10336527760682507,Original +1205,0.4915254237288136,0.3293079096045198,-7.7130494574412145,0.004530195336094142,Original +1206,0.3050847457627119,0.22344632768361583,-3.080575323621966,0.05411151529897513,Original +1207,0.3898305084745763,0.30357815442561203,-3.206327062041345,0.08504610921846024,Original +1208,0.3728813559322034,0.32902542372881355,-4.458405259144846,0.02100779526554658,Original +1209,0.2542372881355932,0.14752824858757063,-6.30893579014757,0.008047357424001828,Original +1210,0.423728813559322,0.35021186440677965,-2.551953540367511,0.08379991236656559,Original +1211,0.3898305084745763,0.3418079096045198,-4.389381125701737,0.02190431958759874,Original +1212,0.288135593220339,0.21532485875706214,-4.8493624334243925,0.016735124561985816,Original +1213,0.3898305084745763,0.2995056497175141,-5.3217343632850165,0.012962453260928666,Original +1214,0.45,0.3432203389830508,-2.896998294604734,0.06265501679556046,Original +1215,0.2711864406779661,0.21532485875706214,-3.1169524568281517,0.05259605209854495,Original +1216,0.4067796610169492,0.3333333333333333,-3.022438607339303,0.05664943058417436,Original +1217,0.4406779661016949,0.26984463276836157,-4.292077854283091,0.023254116236581038,Original +1218,0.4067796610169492,0.30388418079096047,-3.079787687349886,0.054144932145154395,Original +1219,0.3728813559322034,0.32561205273069677,-1.3195730536286363,0.31778111693939937,Original +1220,0.3220338983050847,0.2699152542372881,-2.3318495798744,0.10197574453300426,Original +1221,0.4666666666666667,0.3389830508474576,-3.119096328227442,0.05250841765245064,Original +1222,0.3220338983050847,0.1352401129943503,-8.430881102204697,0.003501725063291807,Original +1223,0.3050847457627119,0.2489406779661017,-7.020021492444736,0.005937531487381006,Original +1224,0.4576271186440678,0.328954802259887,-3.1151717210383336,0.05266898293338368,Original +1225,0.4576271186440678,0.3122881355932204,-3.467950511615431,0.04040635351163493,Original +1226,0.4067796610169492,0.3260828625235405,-3.6801457918777314,0.0665501469345416,Original +1227,0.423728813559322,0.354590395480226,-5.215051241964461,0.01370881147200009,Original +1228,0.3728813559322034,0.3038135593220339,-9.817377185403636,0.002246445968285984,Original +1229,0.3220338983050847,0.26596045197740115,-2.465458081419365,0.0904281089259112,Original +1230,0.5084745762711864,0.36264124293785316,-5.251461666642595,0.0134479218636803,Original +1231,0.3728813559322034,0.27443502824858756,-3.458769578971492,0.04067649752950825,Original +1232,0.4406779661016949,0.34173728813559323,-3.108570507260323,0.052940452969860205,Original +1233,0.45,0.3220338983050847,-3.55910413197229,0.037844293787820955,Original +1234,0.3559322033898305,0.31652542372881354,-2.9875904328450478,0.058242586475556184,Original +1235,0.4745762711864407,0.4096045197740113,-2.5245779797628782,0.0858312019877726,Original +1236,0.3389830508474576,0.3089453860640301,-2.8553936637391883,0.10388662348248497,Original +1237,0.3898305084745763,0.3458333333333333,-2.306948594944211,0.10432200362127822,Original +1238,0.3898305084745763,0.3038135593220339,-7.137099922436499,0.005662825302674034,Original +1239,0.3728813559322034,0.18559322033898304,-10.368238367141146,0.0019142559726229866,Original +1240,0.3728813559322034,0.2786016949152542,-6.156258728408069,0.008624465754537623,Original +1241,0.3728813559322034,0.2956920903954802,-2.834146743848625,0.06596378750306289,Original +1242,0.4067796610169492,0.34187853107344635,-3.891797498256559,0.030085204803105746,Original +1243,0.38333333333333336,0.3389830508474576,-2.266099806569284,0.1083141230940181,Original +1244,0.3220338983050847,0.25741525423728817,-2.4557675307968445,0.09121010163141306,Original +1245,0.5254237288135594,0.39258474576271185,-3.5522131403755797,0.03803061001063327,Original +1246,0.4406779661016949,0.31207627118644066,-5.440282569102571,0.012194130448693096,Original +1247,0.36666666666666664,0.32627118644067793,-3.788162541206027,0.032263411126476765,Original +1248,0.38333333333333336,0.34745762711864403,-3.2791258997889505,0.046456412128927795,Original +1249,0.3220338983050847,0.25741525423728817,-5.952990444986046,0.009480187104661204,Original +1250,0.3389830508474576,0.3258003766478343,-3.4999999999999902,0.07282735005446969,Original +1251,0.3559322033898305,0.2783898305084746,-3.9414887073599294,0.029107725320715257,Original +1252,0.4,0.364406779661017,-2.193378465041791,0.1158873271135981,Original +1253,0.3389830508474576,0.28255649717514125,-3.1504339973019846,0.05124820108543329,Original +1254,0.4745762711864407,0.41377118644067795,-2.677569682825962,0.07520419613663175,Original +1255,0.3559322033898305,0.24491525423728813,-6.057634803744621,0.00902640078253155,Original +1256,0.4576271186440678,0.29117231638418084,-4.3575806009385145,0.022334035979091265,Original +1257,0.3898305084745763,0.32916666666666666,-10.509267049042842,0.001839820961868861,Original +1258,0.38333333333333336,0.2966101694915254,-4.298859857965993,0.023156605272933835,Original +1259,0.423728813559322,0.2870056497175142,-17.37458743854924,0.0004155011309011471,Original +1260,0.3050847457627119,0.2303201506591337,-5.066892497031914,0.0368133974917626,Original +1261,0.4576271186440678,0.2956920903954802,-5.945744266829731,0.009512707270210679,Original +1262,0.3220338983050847,0.2750470809792844,-2.0432809561528478,0.1777389669818575,Original +1263,0.4576271186440678,0.3129943502824859,-2.399133282225983,0.09594924200933898,Original +1264,0.4166666666666667,0.3050847457627119,-4.8621138936836426,0.016615697449670994,Original +1265,0.3389830508474576,0.2699858757062147,-3.0417250416423958,0.055791220683953764,Original +1266,0.3898305084745763,0.30348399246704333,-4.849951400603131,0.039981081861906576,Original +1267,0.4,0.3093220338983051,-4.068509688367149,0.02678829448291478,Original +1268,0.423728813559322,0.2828389830508474,-12.090909090909088,0.0012175905817395098,Original +1269,0.4067796610169492,0.28276836158192087,-4.398585621119437,0.021781940473571012,Original +1270,0.3559322033898305,0.2784604519774011,-3.6540488693517794,0.03539195989892865,Original +1271,0.35,0.2754237288135593,-3.195601372491276,0.049498206131116385,Original +1272,0.3898305084745763,0.30357815442561203,-3.206327062041345,0.08504610921846024,Original +1273,0.4,0.3347457627118644,-2.6807959017177474,0.07499815239256642,Original +1274,0.4406779661016949,0.35430790960451974,-2.9187879140660207,0.06155652231610408,Original +1275,0.3220338983050847,0.24901129943502825,-2.864760573339222,0.06432558093621543,Original +1276,0.5423728813559322,0.3375,-8.30959245705665,0.0036520779681317537,Original +1277,0.288135593220339,0.22351694915254236,-3.3098123366183496,0.04539853318110197,Original +1278,0.4067796610169492,0.3161723163841808,-2.483483730018013,0.0889952126349582,Original +1279,0.31666666666666665,0.2754237288135593,-3.8676344826299247,0.030575765124777585,Original +1280,0.35,0.2966101694915254,-3.6373066958946425,0.03580932390070352,Original +1281,0.3,0.2584745762711864,-2.869146214468688,0.06409507369957781,Original +1282,0.4067796610169492,0.3423728813559322,-2.203747318127119,0.15839077808830893,Original +1283,0.288135593220339,0.2278954802259887,-2.1351104338488116,0.12241769751046185,Original +1284,0.3389830508474576,0.28269774011299437,-3.521966119583988,0.03886239529216442,Original +1285,0.23728813559322035,0.19077212806026367,-1.9859386750277492,0.1854304501080007,Original +1286,0.2542372881355932,0.21942090395480224,-37.92307692307706,4.033425013157966e-05,Original +1287,0.3728813559322034,0.2909604519774011,-2.6148419405355545,0.07935310501190229,Original +1288,0.4166666666666667,0.31779661016949157,-6.831300510639733,0.0064184892478205335,Original +1289,0.3898305084745763,0.23241525423728815,-4.29193057069012,0.02325623974857282,Original +1290,0.3898305084745763,0.3377118644067797,-3.1733791192340908,0.0503496079687739,Original +1291,0.3220338983050847,0.2364406779661017,-3.377315745886016,0.04317682241293437,Original +1292,0.5254237288135594,0.3247175141242937,-5.643467443294131,0.011010270079289456,Original +1293,0.4406779661016949,0.34611581920903955,-7.873610453398626,0.004268617658520372,Original +1294,0.423728813559322,0.3162429378531073,-2.825094808781018,0.06645811724840166,Original +1295,0.3389830508474576,0.26617231638418076,-2.337868480725624,0.10141825639016255,Original +1296,0.2542372881355932,0.18975988700564972,-3.2917415848871014,0.04601775946765137,Original +1297,0.3389830508474576,0.25741525423728817,-3.8324745624304253,0.03130808733647537,Original +1298,0.576271186440678,0.32902542372881355,-9.542627417316496,0.0024409490317057526,Original +1299,0.3389830508474576,0.27860169491525427,-2.340709452706567,0.10115640234950059,Original +1300,0.3220338983050847,0.27005649717514124,-7.413571269033491,0.005077486364715219,Original +1301,0.3559322033898305,0.30798022598870056,-3.889154254622231,0.030138373467386664,Original +1302,0.3728813559322034,0.30254237288135594,-27.6666666666665,0.023000340589345674,Original +1303,0.3728813559322034,0.27464689265536724,-3.0029751452311446,0.05753239842963967,Original +1304,0.2711864406779661,0.24463276836158193,-2.631494801492514,0.07822464952325936,Original +1305,0.43333333333333335,0.3432203389830508,-2.8168388126916613,0.06691301311594693,Original +1306,0.45,0.31779661016949157,-5.039254603264428,0.015065771599115848,Original +1307,0.3389830508474576,0.2701271186440678,-3.9729784680823,0.028509358260687255,Original +1308,0.4576271186440678,0.2951977401129944,-6.340155233829669,0.00793564788180792,Original +1309,0.3728813559322034,0.2615819209039548,-3.256754496379134,0.04724737989320142,Original +1310,0.3728813559322034,0.2699152542372881,-5.1240286090443545,0.014390539846078056,Original +1311,0.4406779661016949,0.37563559322033896,-2.224345699469886,0.11258691448891019,Original +1312,0.3728813559322034,0.29783427495291903,-3.5957489244818297,0.06938932086375887,Original +1313,0.23728813559322035,0.18568738229755177,-1.9376663567761192,0.19225685599558717,Original +1314,0.3333333333333333,0.2796610169491526,-3.6565517048676255,0.035330102302749224,Original +1315,0.26666666666666666,0.1906779661016949,-2.7138510050026126,0.07292684266802282,Original +1316,0.4745762711864407,0.37570621468926557,-2.8482596056990577,0.06520219060265608,Original +1317,0.3728813559322034,0.29103107344632767,-4.887164066279016,0.016384294164034276,Original +1318,0.23333333333333334,0.1483050847457627,-2.866666666666667,0.06422527059785652,Original +1319,0.3728813559322034,0.29209039548022603,-7.654614504282301,0.016642001057945596,Original +1320,0.423728813559322,0.2913135593220339,-4.79001040774592,0.017305937126765883,Original +1321,0.4406779661016949,0.28700564971751413,-6.922217740406311,0.006180622722235997,Original +1322,0.4745762711864407,0.3292372881355933,-6.337740226443206,0.007944216283270866,Original +1323,0.2711864406779661,0.2192090395480226,-2.472975320624164,0.08982713951273004,Original +1324,0.4406779661016949,0.33319209039548026,-6.046351066103405,0.00907394218732086,Original +1325,0.3728813559322034,0.269774011299435,-2.6462479326196116,0.0772414424024133,Original +1326,0.423728813559322,0.30798022598870056,-6.24495382880618,0.008282834737265843,Original +1327,0.3898305084745763,0.29936440677966103,-4.542638876598001,0.019977054761155088,Original +1328,0.4666666666666667,0.3305084745762712,-3.092031441659996,0.05362839188122225,Original +1329,0.3389830508474576,0.2662429378531074,-2.0881715296001104,0.12800003155007977,Original +1330,0.423728813559322,0.29936440677966103,-2.5346106753467175,0.08507981646292648,Original +1331,0.3389830508474576,0.2617231638418079,-4.608368949689674,0.019217889743118922,Original +1332,0.4576271186440678,0.3415960451977401,-3.626931567328918,0.036071124625796494,Original +1333,0.4915254237288136,0.38022598870056495,-2.535033549783168,0.08504832368725805,Original +1334,0.423728813559322,0.3209039548022599,-3.4248301792021634,0.04169532187000911,Original +1335,0.4067796610169492,0.3545197740112994,-4.1111111111111125,0.02606395678518873,Original +1336,0.5254237288135594,0.32457627118644067,-6.742138664692472,0.006663608853032706,Original +1337,0.36666666666666664,0.288135593220339,-2.6750562472452653,0.07536520355046247,Original +1338,0.4406779661016949,0.3627118644067797,-3.6442193578369366,0.03563624031042606,Original +1339,0.4067796610169492,0.28241525423728814,-3.0409065774469273,0.0558273074325418,Original +1340,0.3050847457627119,0.2319915254237288,-6.013276450562215,0.009215200973280897,Original +1341,0.4067796610169492,0.34576271186440677,-2.4308053376742573,0.09326286520252099,Original +1342,0.4745762711864407,0.40091807909604515,-2.985011716688378,0.05836269929186945,Original +1343,0.288135593220339,0.2192090395480226,-4.357142857142859,0.02234002693058057,Original +1344,0.288135593220339,0.24456214689265537,-2.609888745022117,0.07969262184669647,Original +1345,0.5254237288135594,0.47238700564971753,-1.8841000222390059,0.15606129783472963,Original +1346,0.4406779661016949,0.3461864406779661,-5.059912204313382,0.014897489104880094,Original +1347,0.4666666666666667,0.3093220338983051,-3.3757575757575755,0.04322652987968803,Original +1348,0.4576271186440678,0.3797316384180791,-2.9703710359097775,0.05905056214606797,Original +1349,0.3559322033898305,0.2362523540489642,-3.955281402843921,0.05837978400303097,Original +1350,0.3389830508474576,0.29764595103578156,-4.346534653465347,0.04906822409381993,Original +1351,0.3559322033898305,0.2911723163841808,-4.423761578539075,0.021451702572010452,Original +1352,0.3728813559322034,0.27838983050847455,-3.7753252988406856,0.03254713415412944,Original +1353,0.3898305084745763,0.3586158192090395,-2.541573388991179,0.08456310051618006,Original +1354,0.3220338983050847,0.2318502824858757,-2.7486274334487737,0.07082370041149343,Original +1355,0.3559322033898305,0.29936440677966103,-2.1631195885543937,0.11922505386124203,Original +1356,0.45,0.30508474576271183,-12.091525958289973,0.0012174072403681394,Original +1357,0.3728813559322034,0.29103107344632767,-3.7674177162295606,0.03272349692653532,Original +1358,0.423728813559322,0.35861581920903957,-3.110819906677478,0.05284775046942044,Original +1359,0.3220338983050847,0.2809792843691149,-5.736842105263147,0.02906640150529952,Original +1360,0.3050847457627119,0.2526365348399247,-3.4171779141104293,0.07600312612521545,Original +1361,0.4166666666666667,0.3220338983050848,-3.53121005385469,0.03860574928821704,Original +1362,0.3559322033898305,0.314406779661017,-1.800059978008079,0.2136563487360759,Original +1363,0.4406779661016949,0.3840395480225989,-2.2968348396222584,0.10529360084626542,Original +1364,0.3050847457627119,0.2153954802259887,-4.628137283451674,0.01899691387142354,Original +1365,0.3389830508474576,0.2701271186440678,-2.5462474269165756,0.08421839435807502,Original +1366,0.36666666666666664,0.288135593220339,-3.421943449225448,0.04178346889141796,Original +1367,0.3559322033898305,0.29992937853107343,-1.9390607807005154,0.1478476458083794,Original +1368,0.3559322033898305,0.26935028248587567,-1.643431635388741,0.34799773853532184,Original +1369,0.3220338983050847,0.22761299435028248,-4.4183200488016485,0.021522528086765566,Original +1370,0.423728813559322,0.2528248587570622,-5.471809142918229,0.011999877727690598,Original +1371,0.4166666666666667,0.3093220338983051,-13.229901852633054,0.0009331248455100845,Original +1372,0.4166666666666667,0.3474576271186441,-4.715027198381945,0.018063824737018258,Original +1373,0.3050847457627119,0.2699858757062147,-5.869851287372005,0.009862263796097724,Original +1374,0.36666666666666664,0.2923728813559322,-5.133234383709284,0.014319609315535732,Original +1375,0.3728813559322034,0.3040960451977401,-2.8752211601887643,0.06377747883646023,Original +1376,0.3728813559322034,0.3248587570621469,-1.9896995023342197,0.14072495503830776,Original +1377,0.3559322033898305,0.2953389830508475,-4.1824309711049805,0.024906888121385386,Original +1378,0.3898305084745763,0.27429378531073445,-4.389736277617274,0.021899581152473327,Original +1379,0.4576271186440678,0.30360169491525424,-6.426920856371511,0.007635697442918866,Original +1380,0.3559322033898305,0.2955508474576271,-3.519233773599044,0.03893867159865646,Original +1381,0.423728813559322,0.3082627118644068,-4.022350177849458,0.02760267623485285,Original +1382,0.3898305084745763,0.34821092278719395,-4.510204081632659,0.045808057902316804,Original +1383,0.423728813559322,0.3258003766478343,-4.913538149119954,0.039012347759055314,Original +1384,0.3050847457627119,0.24887005649717514,-3.297939899442541,0.04580416394678644,Original +1385,0.3898305084745763,0.29209039548022603,-9.26047768699887,0.011460849730983888,Original +1386,0.3898305084745763,0.3250706214689265,-4.167794334654786,0.025138858812902672,Original +1387,0.4576271186440678,0.3121468926553672,-3.4545091251635194,0.04080264000619044,Original +1388,0.45,0.385593220338983,-5.066666666666667,0.014842997288171941,Original +1389,0.3559322033898305,0.2530367231638418,-3.0699274770848146,0.05456547495842803,Original +1390,0.3050847457627119,0.2402542372881356,-2.253658340717321,0.10956651809067314,Original +1391,0.4,0.3220338983050848,-2.732807218615621,0.07177097972724268,Original +1392,0.4067796610169492,0.32069209039548024,-7.1384266143873925,0.005659809403554773,Original +1393,0.3050847457627119,0.24152542372881355,-5.9603956067927,0.027012812355076195,Original +1394,0.3898305084745763,0.29145480225988696,-3.4168337494506282,0.04194007659421182,Original +1395,0.1864406779661017,0.13495762711864406,-2.257918405893237,0.10913572969388878,Original +1396,0.3050847457627119,0.23644067796610171,-3.233110040049279,0.04810198076292131,Original +1397,0.3559322033898305,0.27429378531073445,-2.907350507765195,0.062130092881679595,Original +1398,0.288135593220339,0.24053672316384178,-2.365917408343781,0.0988685692030821,Original +1399,0.3559322033898305,0.24915254237288134,-3.4558397655933613,0.04076318933119888,Original +1400,0.423728813559322,0.34611581920903955,-2.894903284380277,0.06276192114601883,Original +1401,0.3898305084745763,0.34201977401129946,-2.394619302503736,0.09633975086570609,Original +1402,0.3559322033898305,0.27853107344632766,-5.181815900134804,0.013952747029258906,Original +1403,0.43333333333333335,0.3686440677966102,-4.46962233410428,0.020866618588713408,Original +1404,0.423728813559322,0.3754943502824859,-3.4249037287283453,0.041693079107582236,Original +1405,0.4915254237288136,0.3121468926553672,-9.141660034508423,0.00276691986404339,Original +1406,0.4745762711864407,0.3968926553672316,-2.528902694294367,0.08550631275341908,Original +1407,0.4,0.25,-14.066533632030769,0.0007781509499729613,Original +1408,0.423728813559322,0.3584039548022599,-3.1685329501250488,0.050537730061605154,Original +1409,0.4745762711864407,0.38848870056497176,-3.0811823304553716,0.05408577968425568,Original +1410,0.4745762711864407,0.40529661016949153,-3.1510846069279967,0.05122244392981766,Original +1411,0.3728813559322034,0.2995762711864407,-6.934810075795833,0.006148597978661057,Original +1412,0.3728813559322034,0.34180790960451973,-6.35085296108589,0.007897838244939661,Original +1413,0.5084745762711864,0.34209039548022596,-5.7423119859693115,0.010488586403531558,Original +1414,0.5166666666666667,0.3686440677966102,-6.342784542369049,0.007926332946572029,Original +1415,0.4745762711864407,0.3,-4.012310343727843,0.027784012310922687,Original +1416,0.4915254237288136,0.4219632768361582,-9.741023075752745,0.0022983741002138604,Original +1417,0.5254237288135594,0.4518361581920904,-2.3115409557535824,0.10388441475596431,Original +1418,0.3728813559322034,0.31257062146892656,-2.208880071004421,0.11422087778664086,Original +1419,0.3898305084745763,0.3671610169491525,-3.9629629629629703,0.02869794241127541,Original +1420,0.4067796610169492,0.3545197740112994,-4.900769721140663,0.01626036695364968,Original +1421,0.48333333333333334,0.4194915254237288,-2.0960998088359606,0.12703618205803208,Original +1422,0.4745762711864407,0.4010593220338983,-4.205211154248234,0.024551320079546985,Original +1423,0.5084745762711864,0.4175141242937853,-2.3260164970888964,0.10251957254301898,Original +1424,0.4915254237288136,0.3879237288135593,-2.9208912467506574,0.06145176525841497,Original +1425,0.45,0.41101694915254233,-2.231327750334267,0.11185847276008758,Original +1426,0.3728813559322034,0.2995056497175141,-2.210333134870083,0.11406615150921069,Original +1427,0.3559322033898305,0.29774011299435027,-3.952777215734684,0.05844741686027941,Original +1428,0.4,0.3432203389830508,-3.021617032571375,0.05668635618881769,Original +1429,0.4067796610169492,0.3079802259887006,-34.12195121951229,5.5338575696591484e-05,Original +1430,0.4666666666666667,0.4110169491525424,-6.858659644654502,0.006345660358547117,Original +1431,0.4067796610169492,0.33799435028248587,-1.8646963468092161,0.15908733357598287,Original +1432,0.4406779661016949,0.39887005649717516,-2.8461538461538454,0.1044600519927322,Original +1433,0.5254237288135594,0.3629237288135593,-10.808536082221455,0.0016941290870239134,Original +1434,0.423728813559322,0.3291666666666667,-5.281280870229153,0.01323906907450259,Original +1435,0.4067796610169492,0.2950564971751412,-2.5561253487432714,0.08349557275673589,Original +1436,0.4576271186440678,0.37132768361581925,-12.168997094322684,0.0011946701728581847,Original +1437,0.4745762711864407,0.3586158192090395,-3.8295559807933683,0.03136988366637992,Original +1438,0.4576271186440678,0.3669491525423728,-5.570197507360285,0.01141908670501291,Original +1439,0.3559322033898305,0.30819209039548023,-2.607416226140155,0.07986277130041312,Original +1440,0.48333333333333334,0.3813559322033898,-4.548172491877701,0.019911659585532055,Original +1441,0.3559322033898305,0.29124293785310734,-3.602392726796967,0.036700127748095945,Original +1442,0.4915254237288136,0.3882062146892656,-7.414941042518773,0.00507478976632389,Original +1443,0.3728813559322034,0.27888418079096045,-2.9282749062301643,0.06108578193839565,Original +1444,0.4745762711864407,0.3502824858757062,-6.487446070815471,0.007435234784132052,Original +1445,0.4915254237288136,0.4301553672316384,-2.6869021956079524,0.07461008165667392,Original +1446,0.43333333333333335,0.3220338983050848,-2.888948165920659,0.06306704716766874,Original +1447,0.4067796610169492,0.3163841807909605,-13.063945294843638,0.0009686387721898685,Original +1448,0.423728813559322,0.2754237288135593,-11.666666666666663,0.007266951354550622,Original +1449,0.5084745762711864,0.41779661016949154,-2.9499240992842286,0.060028321215766844,Original +1450,0.4,0.364406779661017,-2.193378465041791,0.1158873271135981,Original +1451,0.45,0.364406779661017,-3.2485009805585183,0.04754349406047413,Original +1452,0.4406779661016949,0.3038841807909604,-7.310266791966458,0.0052864231861055435,Original +1453,0.3898305084745763,0.303954802259887,-5.811865258054232,0.010140767780751686,Original +1454,0.423728813559322,0.3931261770244821,-2.400829779247631,0.1383734677102449,Original +1455,0.4067796610169492,0.3542372881355932,-2.708482575649219,0.0732583731798295,Original +1456,0.3898305084745763,0.3334039548022599,-3.7816368562219433,0.0324072409590771,Original +1457,0.4576271186440678,0.3794491525423729,-2.2802263709416946,0.10691298923861156,Original +1458,0.423728813559322,0.35416666666666663,-2.7464616338694436,0.07095246240968993,Original +1459,0.43333333333333335,0.3728813559322034,-2.118915741970083,0.1243102693621582,Original +1460,0.423728813559322,0.37153954802259886,-2.386591892485171,0.09703900448133006,Original +1461,0.4576271186440678,0.3502824858757062,-8.101627221513192,0.003930361543241993,Original +1462,0.3898305084745763,0.33742937853107347,-3.6447947158122775,0.035621882387353185,Original +1463,0.4915254237288136,0.2951977401129944,-9.61483104773844,0.002387739072267894,Original +1464,0.4067796610169492,0.31207627118644066,-4.941615370705469,0.015895579161855126,Original +1465,0.4745762711864407,0.35903954802259885,-3.4967484138936817,0.03957367708757962,Original +1466,0.4666666666666667,0.36864406779661013,-5.216423434339635,0.013698860156045016,Original +1467,0.43333333333333335,0.3813559322033898,-10.623244953089115,0.0017824441583808043,Original +1468,0.3728813559322034,0.2919020715630885,-2.841519981956887,0.10474932255486497,Original +1469,0.4067796610169492,0.33681732580037665,-2.5438493495451917,0.12598335802475616,Original +1470,0.423728813559322,0.3456920903954802,-3.165877220586101,0.0506411985161682,Original +1471,0.423728813559322,0.29774011299435027,-5.144479879486528,0.03576976981268908,Original +1472,0.6,0.4957627118644068,-4.919999999999999,0.016087280685965905,Original +1473,0.4745762711864407,0.36292372881355933,-2.9112467789048893,0.06193395217425993,Original +1474,0.4067796610169492,0.28721751412429375,-4.6372424549345395,0.018896243004971457,Original +1475,0.3898305084745763,0.3077683615819209,-3.416406029443321,0.04195321965391951,Original +1476,0.4576271186440678,0.3416666666666667,-6.968395313620221,0.006064246319147875,Original +1477,0.4067796610169492,0.325,-2.3275547317374796,0.10237581960629237,Original +1478,0.423728813559322,0.38201506591337103,-2.838985414963086,0.10490802421935423,Original +1479,0.4915254237288136,0.3797316384180791,-6.404709068596953,0.007711044857755044,Original +1480,0.3898305084745763,0.2909604519774011,-3.081578172139684,0.054069005258383575,Original +1481,0.55,0.40254237288135597,-5.266283362867523,0.013343577598251742,Original +1482,0.4067796610169492,0.3587570621468927,-3.544745038970269,0.038233853222009985,Original +1483,0.423728813559322,0.3248587570621469,-2.2831482556870473,0.10662592307782301,Original +1484,0.36666666666666664,0.2923728813559322,-4.252457958970371,0.023834502002819766,Original +1485,0.4576271186440678,0.40084745762711865,-3.941176470588236,0.029113738898400077,Original +1486,0.423728813559322,0.32090395480225986,-5.31174648930179,0.013030041853782981,Original +1487,0.3728813559322034,0.3204802259887006,-2.1906690113295393,0.11618158740373133,Original +1488,0.4406779661016949,0.325,-3.9643008779307634,0.028672657850254017,Original +1489,0.4915254237288136,0.3544491525423729,-7.463121733660972,0.00498112898453179,Original +1490,0.5,0.3686440677966102,-4.69122943243946,0.018313350909407743,Original +1491,0.4576271186440678,0.3626412429378531,-2.5444250028895907,0.08435259068095222,Original +1492,0.576271186440678,0.4848870056497175,-2.3523540362420694,0.10009165403065308,Original +1493,0.3559322033898305,0.303954802259887,-3.517707919348613,0.038981350469306286,Original +1494,0.4576271186440678,0.3038135593220339,-4.198379191556001,0.024657266011800877,Original +1495,0.4067796610169492,0.32923728813559316,-5.019825255742889,0.015226316305783647,Original +1496,0.423728813559322,0.33757062146892663,-1.955570219094377,0.18968621213880818,Original +1497,0.5084745762711864,0.4048728813559322,-5.068605007676937,0.014827407662859303,Original +1498,0.3898305084745763,0.3425612052730697,-2.4888108666663245,0.13056092060471672,Original +1499,0.423728813559322,0.37146892655367236,-3.9219921560235247,0.029486306559984257,Original +1500,0.4915254237288136,0.35035310734463276,-3.355609536673555,0.043875869462308204,Original +1501,0.5254237288135594,0.36737288135593216,-5.454903356437624,0.012103536343441582,Original +1502,0.288135593220339,0.2362994350282486,-7.416033307666337,0.005072640837140369,Original +1503,0.3728813559322034,0.3206920903954802,-4.327561335547406,0.022749742422749963,Original +1504,0.3559322033898305,0.29957627118644065,-7.052288308361696,0.005860100906942432,Original +1505,0.4915254237288136,0.3422316384180791,-2.414469415876127,0.09463683116855957,Original +1506,0.4745762711864407,0.354590395480226,-6.260853478619376,0.008223481825561816,Original +1507,0.4745762711864407,0.3882062146892655,-4.699364429896171,0.018227552356258447,Original +1508,0.4406779661016949,0.31624293785310736,-3.384433655570972,0.042950673824966874,Original +1509,0.4406779661016949,0.31235875706214694,-8.330919086612344,0.003625030826573087,Original +1510,0.4406779661016949,0.28700564971751413,-14.665257155667614,0.0006876691247397437,Original +1511,0.3898305084745763,0.3415960451977401,-3.006477663714325,0.05737223684387089,Original +1512,0.3898305084745763,0.31193502824858754,-2.8294238473612916,0.06622113261732844,Original +1513,0.4166666666666667,0.3601694915254237,-3.233808333817773,0.0480764627944047,Original +1514,0.4666666666666667,0.35593220338983056,-4.438525973846813,0.021261047550038326,Original +1515,0.43333333333333335,0.3432203389830508,-2.8168388126916613,0.06691301311594693,Original +1516,0.423728813559322,0.3334745762711865,-3.3752877168731676,0.043241533194103954,Original +1517,0.4576271186440678,0.4176553672316384,-2.608655045302455,0.07977746445984121,Original +1518,0.4406779661016949,0.3330508474576271,-3.8022134355217396,0.031956488835427684,Original +1519,0.3728813559322034,0.26581920903954803,-6.147383047873269,0.008659663805844972,Original +1520,0.5254237288135594,0.3882062146892655,-7.465956735313377,0.004975689184389457,Original +1521,0.43333333333333335,0.3728813559322034,-2.4230728530694363,0.09391016538665153,Original +1522,0.3559322033898305,0.31207627118644066,-2.1523998630107477,0.12043504718327508,Original +1523,0.3898305084745763,0.30856873822975517,-2.102946127279898,0.1701873208222798,Original +1524,0.3728813559322034,0.2913135593220339,-4.4679931695295805,0.020887046587591118,Original +1525,0.48333333333333334,0.3686440677966102,-5.413333333333333,0.012363451013336186,Original +1526,0.45,0.3516949152542373,-3.2276227144907716,0.048303103292659674,Original +1527,0.4406779661016949,0.3209745762711864,-3.5295270186824976,0.038652315352543654,Original +1528,0.3898305084745763,0.3119350282485876,-2.259178910141389,0.10900865631385685,Original +1529,0.4166666666666667,0.326271186440678,-5.6348858004839935,0.011057151074446126,Original +1530,0.4067796610169492,0.2870762711864407,-5.605795079572491,0.011218029506615877,Original +1531,0.5423728813559322,0.3502824858757062,-4.93323850074808,0.015969516750107843,Original +1532,0.423728813559322,0.3292372881355932,-1.9315505529107941,0.14893966449897095,Original +1533,0.5254237288135594,0.4470338983050848,-2.370301890762351,0.0984770870147508,Original +1534,0.4576271186440678,0.33340395480225987,-2.965463727554792,0.0592833948188603,Original +1535,0.4,0.3347457627118644,-3.0800000000000014,0.05413592185295247,Original +1536,0.423728813559322,0.4044256120527307,-2.4634512493170466,0.13274843347385198,Original +1537,0.4,0.326271186440678,-2.8103535287436237,0.0672730683623227,Original +1538,0.5166666666666667,0.41949152542372886,-2.4681862514695623,0.09020943675168575,Original +1539,0.5666666666666667,0.423728813559322,-4.404164695679157,0.021708194051045772,Original +1540,0.5254237288135594,0.40091807909604515,-8.284302459961115,0.0036844984307791845,Original +1541,0.4406779661016949,0.3587570621468926,-3.775478418443893,0.032543731166538116,Original +1542,0.3898305084745763,0.35854519774011306,-1.7390744938729776,0.18040454692234978,Original +1543,0.423728813559322,0.35021186440677965,-3.0253320224395073,0.05651962666233991,Original +1544,0.4576271186440678,0.3923022598870057,-2.2432895898859275,0.11062363137762433,Original +1545,0.4915254237288136,0.3714689265536723,-5.879572942861632,0.009816556345478428,Original +1546,0.4745762711864407,0.3338276836158192,-3.2632296876876485,0.047016699886531045,Original +1547,0.45,0.35169491525423724,-12.115804854516572,0.0012102202443320875,Original +1548,0.4745762711864407,0.3627824858757063,-8.098378653528943,0.003934927255862458,Original +1549,0.423728813559322,0.32062146892655363,-9.054535625012225,0.002845269537083193,Original +1550,0.4666666666666667,0.3686440677966102,-6.772746506262815,0.006578095712086025,Original +1551,0.5084745762711864,0.40925141242937857,-4.746377851626824,0.01774180468795205,Original +1552,0.4666666666666667,0.3305084745762712,-5.565656594987991,0.011445072212549055,Original +1553,0.38333333333333336,0.3220338983050847,-2.557369525291349,0.08340507155260364,Original +1554,0.559322033898305,0.40557909604519776,-3.3491158311330995,0.044087787022349285,Original +1555,0.4745762711864407,0.34576271186440677,-6.516946235415337,0.007340034612930771,Original +1556,0.3898305084745763,0.28255649717514125,-2.8447438754418775,0.06539088569746056,Original +1557,0.3728813559322034,0.3204802259887006,-2.685797986127019,0.0746800724946613,Original +1558,0.3898305084745763,0.3375706214689266,-3.7567808109943908,0.03296266830646462,Original +1559,0.423728813559322,0.3309792843691149,-1.7858245155746162,0.2160472917948211,Original +1560,0.4915254237288136,0.4471045197740113,-3.0677087606277684,0.054660670246442895,Original +1561,0.4576271186440678,0.35918079096045197,-2.275676500715352,0.10736186186928126,Original +1562,0.5084745762711864,0.29936440677966103,-9.423964347365773,0.0025318610514215717,Original +1563,0.423728813559322,0.35847457627118645,-3.2665066947432746,0.04690049922235536,Original +1564,0.5084745762711864,0.3969632768361582,-2.896194824692244,0.06269598939057865,Original +1565,0.35,0.3177966101694915,-7.600000000000018,0.004727189949278468,Original +1566,0.4,0.364406779661017,-1.7643529058752923,0.17586309782892548,Original +1567,0.4067796610169492,0.34152542372881356,-2.7297642578116172,0.07195498560460242,Original +1568,0.3898305084745763,0.3419491525423729,-2.59993484464519,0.08038034833840463,Original +1569,0.4915254237288136,0.38834745762711864,-7.704320406910871,0.00454501569408191,Original +1570,0.3898305084745763,0.32033898305084746,-3.891547782585247,0.06013719541915213,Original +1571,0.4576271186440678,0.3543785310734463,-5.771672200284073,0.010339886184267465,Original +1572,0.4,0.3474576271186441,-4.802499349297195,0.017183746707078684,Original +1573,0.4067796610169492,0.3504237288135593,-3.262064802178429,0.04705809381582309,Original +1574,0.4067796610169492,0.34611581920903955,-2.3420605578207687,0.10103215816449211,Original +1575,0.5084745762711864,0.4430790960451977,-12.700432204420535,0.001052946749526643,Original +1576,0.423728813559322,0.36292372881355933,-5.327313464522444,0.012924898405343976,Original +1577,0.5084745762711864,0.38425141242937855,-4.698424427455757,0.018237439696403804,Original +1578,0.4576271186440678,0.3882768361581921,-6.446590136835602,0.007569780791386365,Original +1579,0.4915254237288136,0.328954802259887,-7.995948131756914,0.004082555066940009,Original +1580,0.4067796610169492,0.3249293785310734,-4.059276987931347,0.026948683537732478,Original +1581,0.4166666666666667,0.3601694915254237,-4.444444444444445,0.0211852376832342,Original +1582,0.5166666666666667,0.3135593220338983,-6.483004673258841,0.007449708002644034,Original +1583,0.4067796610169492,0.3586158192090395,-3.921613238217157,0.029493726846163477,Original +1584,0.4406779661016949,0.3038135593220339,-9.848354363498629,0.0022258218489771527,Original +1585,0.3728813559322034,0.30819209039548023,-2.8185816226035203,0.06681666397008225,Original +1586,0.423728813559322,0.3077683615819209,-4.319796363784691,0.02285889788909435,Original +1587,0.36666666666666664,0.3008474576271187,-3.7673867088977024,0.032724190890523516,Original +1588,0.4406779661016949,0.32909604519774016,-5.006421579143373,0.015338372293344877,Original +1589,0.26666666666666666,0.1822033898305085,-10.409843826150722,0.001891887685589342,Original +1590,0.3728813559322034,0.32895480225988705,-2.949093002418226,0.060068491192589654,Original +1591,0.4576271186440678,0.4213747645951036,-3.60190807688241,0.06917625132966015,Original +1592,0.4067796610169492,0.3712570621468927,-2.1556241033270016,0.12006957075894573,Original +1593,0.3728813559322034,0.3081214689265537,-3.8998685439923104,0.029923600161968773,Original +1594,0.4067796610169492,0.3206214689265537,-3.0461946335774264,0.05559466924951487,Original +1595,0.5084745762711864,0.3803672316384181,-2.5550917784808758,0.08357084637244536,Original +1596,0.3898305084745763,0.3311676082862524,-2.2490974729241855,0.1534474328108915,Original +1597,0.4576271186440678,0.3293079096045198,-8.10248857720817,0.003929152121721815,Original +1598,0.36666666666666664,0.3432203389830508,-5.53333333333333,0.011632297103592594,Original +1599,0.4915254237288136,0.401271186440678,-2.459129645217466,0.09093785730195074,Original +1600,0.5423728813559322,0.4557909604519774,-3.177872547530921,0.05017596813649382,Original +1601,0.23728813559322035,0.19837570621468925,-4.553719008264464,0.01984639027416479,Original +1602,0.4745762711864407,0.39279661016949147,-2.2752115860184423,0.10740785666680364,Original +1603,0.4745762711864407,0.3667372881355932,-2.5590817041221827,0.08328072516772207,Original +1604,0.3728813559322034,0.307909604519774,-4.795831523312723,0.017248843874307026,Original +1605,0.4406779661016949,0.39646892655367233,-2.0743037811227554,0.12970694368573776,Original +1606,0.3728813559322034,0.29199623352165727,-4.218647880820993,0.05185732044957473,Original +1607,0.3728813559322034,0.307909604519774,-3.8877095717511785,0.030167484179746327,Original +1608,0.43333333333333335,0.3389830508474576,-3.1281966106828594,0.05213847018410804,Original +1609,0.423728813559322,0.2911016949152542,-3.8194407004959707,0.03158527352615677,Original +1610,0.36666666666666664,0.2796610169491525,-3.422222222222223,0.041774946157591517,Original +1611,0.35,0.3008474576271186,-3.396132253860897,0.042582247398141884,Original +1612,0.5254237288135594,0.4008474576271187,-5.126174517160044,0.01437396472653316,Original +1613,0.4745762711864407,0.4093926553672317,-3.8752447538675496,0.030420159550251232,Original +1614,0.4166666666666667,0.3220338983050848,-2.456339176607667,0.0911637428376164,Original +1615,0.4067796610169492,0.28721751412429375,-3.7810435820759127,0.03242035778750225,Original +1616,0.4067796610169492,0.3291666666666667,-4.334673395356118,0.022650355990863152,Original +1617,0.4745762711864407,0.2828389830508475,-10.598274792221071,0.0017948089853278408,Original +1618,0.4166666666666667,0.3050847457627119,-2.1744034361245617,0.1179670753099225,Original +1619,0.6271186440677966,0.4050847457627119,-9.240027567747635,0.0026818554468391116,Original +1620,0.4406779661016949,0.3543785310734463,-7.613153050047849,0.004703696031017581,Original +1621,0.45,0.3940677966101695,-2.976518270891203,0.05876051507230344,Original +1622,0.4745762711864407,0.38411016949152543,-7.506646894907283,0.0048984693502791745,Original +1623,0.4576271186440678,0.39668079096045195,-3.674203780886493,0.034897733800121254,Original +1624,0.4406779661016949,0.3627824858757062,-5.044529690751584,0.015022564694235147,Original +1625,0.4576271186440678,0.32083333333333336,-5.361374140743174,0.012698680109057153,Original +1626,0.4915254237288136,0.371045197740113,-5.171216742476558,0.014031731053501115,Original +1627,0.38333333333333336,0.3347457627118644,-3.822222222222226,0.03152585651673634,Original +1628,0.3389830508474576,0.3038135593220339,-27.666666666666476,0.00010364812386939461,Original +1629,0.4406779661016949,0.3425612052730697,-7.542807340088425,0.01712633052558875,Original +1630,0.423728813559322,0.2867937853107344,-3.780359197747885,0.032435497445757994,Original +1631,0.423728813559322,0.35430790960451974,-2.986076879787708,0.058313047921487345,Original +1632,0.3220338983050847,0.2911016949152543,-2.5053535141492107,0.08729389266327836,Original +1633,0.5084745762711864,0.38389830508474576,-3.5352476162925313,0.03849432965830513,Original +1634,0.4745762711864407,0.3296610169491525,-2.932194632545476,0.060892602743676055,Original +1635,0.3898305084745763,0.3145951035781544,-5.112873614515615,0.036189580961726336,Original +1636,0.4067796610169492,0.388135593220339,-3.316624790355395,0.04516783583455813,Original +1637,0.2711864406779661,0.24039548022598872,-2.2432542767385697,0.1106272525728738,Original +1638,0.4067796610169492,0.2913135593220339,-4.63515148563193,0.01891930032646319,Original +1639,0.423728813559322,0.31645480225988704,-7.447420720142193,0.005011398574053057,Original +1640,0.5,0.4025423728813559,-4.372697328618899,0.02212841807396774,Original +1641,0.4406779661016949,0.36525423728813555,-9.888888888888882,0.010071751888174055,Original +1642,0.4067796610169492,0.31631355932203387,-4.212334695138946,0.024441477588059136,Original +1643,0.4745762711864407,0.3838983050847458,-13.55263062255603,0.0008688656783751102,Original +1644,0.4915254237288136,0.29583333333333334,-3.9331245586533203,0.029269369284174996,Original +1645,0.4067796610169492,0.3418079096045198,-5.938574464184704,0.00954502881900154,Original +1646,0.5423728813559322,0.42189265536723164,-2.446859704851505,0.09193623834826871,Original +1647,0.4406779661016949,0.31242937853107344,-7.44762841439633,0.005010996583977287,Original +1648,0.423728813559322,0.35021186440677965,-6.955318569856883,0.006096907162548966,Original +1649,0.4576271186440678,0.3627824858757063,-6.8705764571632155,0.006314278455610205,Original +1650,0.3898305084745763,0.3165254237288136,-8.288366308708238,0.0036792632033313915,Original +1651,0.4067796610169492,0.31666666666666665,-4.566003911847601,0.019702811079520054,Original +1652,0.4166666666666667,0.364406779661017,-2.9623708453941098,0.05943073144700672,Original +1653,0.4067796610169492,0.3332627118644068,-2.9486299139697687,0.06009088857589412,Original +1654,0.3898305084745763,0.32040960451977407,-1.8464888902941,0.16198897032955312,Original +1655,0.423728813559322,0.341454802259887,-2.2689902462968896,0.10802564058574593,Original +1656,0.4406779661016949,0.31242937853107344,-5.102734357993427,0.01455637683815113,Original +1657,0.35,0.2754237288135593,-2.66340767777208,0.07611699066362566,Original +1658,0.4406779661016949,0.35035310734463276,-4.624808617471442,0.019033890844563105,Original +1659,0.4406779661016949,0.37549435028248584,-2.153106812065672,0.12035479878967444,Original +1660,0.2711864406779661,0.24708097928436912,-2.461538461538462,0.13291549663455224,Original +1661,0.3389830508474576,0.30798022598870056,-2.5144900114567874,0.08659496027337239,Original +1662,0.3898305084745763,0.26151129943502827,-8.20761557188874,0.0037851627982459913,Original +1663,0.4,0.3177966101694915,-3.377106525540539,0.043183492567890926,Original +1664,0.5084745762711864,0.39216101694915256,-4.374860785929049,0.02209919137309572,Original +1665,0.48333333333333334,0.3940677966101695,-4.0051372633147,0.027914510109473,Original +1666,0.423728813559322,0.30798022598870056,-5.52252684001195,0.011695784203535333,Original +1667,0.4576271186440678,0.41334745762711866,-3.0332358222638836,0.05616694738868672,Original +1668,0.4406779661016949,0.33326271186440676,-3.2380690490156394,0.04792113259547419,Original +1669,0.4067796610169492,0.3247175141242938,-3.864669010850272,0.030636676156145785,Original +1670,0.423728813559322,0.3754943502824859,-18.459459459459406,0.0003469320912607935,Original +1671,0.4067796610169492,0.35444915254237286,-7.394856051762754,0.005114520056169129,Original +1672,0.423728813559322,0.38389830508474576,-2.591193878173862,0.08099031623314684,Original +1673,0.423728813559322,0.3372175141242938,-2.2461944693225684,0.11032623843400814,Original +1674,0.4576271186440678,0.35444915254237286,-69.57142857142838,6.544177344803568e-06,Original +1675,0.4,0.3347457627118644,-2.262423733759691,0.10868236109185891,Original +1676,0.43333333333333335,0.3305084745762712,-2.886707772014044,0.06318231943174393,Original +1677,0.4745762711864407,0.39661016949152544,-5.396227020339848,0.012472526064506972,Original +1678,0.4067796610169492,0.3483050847457627,-11.775818566563489,0.00713429023847155,Original +1679,0.4406779661016949,0.3541666666666667,-3.8771841553769035,0.030380667708462643,Original +1680,0.423728813559322,0.3629237288135593,-4.044393553582212,0.02720985020655579,Original +1681,0.3728813559322034,0.30798022598870056,-2.584061622196982,0.08149225560483021,Original +1682,0.4745762711864407,0.3711158192090395,-5.036029713302375,0.015092265651518118,Original +1683,0.4745762711864407,0.38813559322033897,-3.0606121836612235,0.05496655926720403,Original +1684,0.5084745762711864,0.3965395480225989,-4.332720166230066,0.022677595074616603,Original +1685,0.4406779661016949,0.3933145009416196,-6.776908139806704,0.021087634455750442,Original +1686,0.4915254237288136,0.40896892655367234,-2.7252378965546162,0.07222977633186194,Original +1687,0.4576271186440678,0.35854519774011295,-7.385335801707823,0.005133495507004227,Original +1688,0.4406779661016949,0.3754943502824859,-5.31399624894294,0.013014777489807249,Original +1689,0.4067796610169492,0.32881355932203393,-3.1543372140892108,0.051093920277185956,Original +1690,0.4406779661016949,0.3628531073446328,-5.382081108767228,0.012563676575340528,Original +1691,0.43333333333333335,0.34745762711864403,-7.849246248313698,0.004307003389198978,Original +1692,0.4067796610169492,0.3163841807909604,-2.724020898427997,0.07230388107794879,Original +1693,0.3898305084745763,0.3038135593220339,-4.135972456516446,0.025652892626118445,Original +1694,0.3559322033898305,0.2786016949152542,-6.564422610165478,0.007190161439169192,Original +1695,0.45,0.36864406779661013,-3.3422909943494004,0.04431191289649832,Original +1696,0.5,0.364406779661017,-4.131182235954578,0.02573144168204107,Original +1697,0.5,0.3813559322033898,-2.919201796799047,0.06153589112407167,Original +1698,0.5084745762711864,0.35444915254237286,-4.447863008870363,0.021141608672485282,Original +1699,0.559322033898305,0.34611581920903955,-7.952423126063747,0.004147501211922912,Original +1700,0.423728813559322,0.354590395480226,-4.195847013586463,0.024696683385781026,Original +1701,0.5084745762711864,0.4514124293785311,-2.7684223456685544,0.06966019639221771,Original +1702,0.4666666666666667,0.38983050847457623,-3.3480885546378523,0.04412143039369844,Original +1703,0.3898305084745763,0.3332627118644068,-3.6621184863223024,0.03519301414612658,Original +1704,0.423728813559322,0.33742937853107347,-4.962234528821576,0.015715475861098616,Original +1705,0.4406779661016949,0.40098870056497177,-3.2752006464574146,0.04659397436436988,Original +1706,0.35,0.2923728813559322,-3.2984845004941277,0.04578545695456099,Original +1707,0.3333333333333333,0.2754237288135593,-3.609848715935058,0.03650753702324681,Original +1708,0.4666666666666667,0.3813559322033898,-10.066666666666663,0.0020873570938939115,Original +1709,0.4,0.3220338983050848,-5.039047529047529,0.01506747099332577,Original +1710,0.4,0.3432203389830508,-3.9231182932531077,0.029464267753910946,Original +1711,0.55,0.43644067796610164,-2.3596084289526735,0.09943521206177618,Original +1712,0.3559322033898305,0.3360169491525423,-1.4242424242424274,0.38970838325942464,Original +1713,0.5423728813559322,0.4132768361581921,-5.184819470451837,0.013930470217760019,Original +1714,0.4745762711864407,0.4091101694915254,-3.301274514960128,0.04568977195819744,Original +1715,0.4576271186440678,0.2870762711864407,-8.442559461787118,0.003487683310568225,Original +1716,0.4745762711864407,0.40508474576271186,-4.984211525609615,0.015526427125038359,Original +1717,0.4406779661016949,0.40084745762711865,-9.945373183606224,0.00216282978388815,Original +1718,0.4745762711864407,0.3882062146892655,-4.699364429896171,0.018227552356258447,Original +1719,0.3898305084745763,0.28700564971751413,-4.0757619541037435,0.02666317079369889,Original +1720,0.4067796610169492,0.358545197740113,-5.255556820568775,0.01341898591418548,Original +1721,0.5084745762711864,0.34187853107344635,-4.739270182928911,0.017814153415116545,Original +1722,0.4745762711864407,0.36299435028248583,-4.303404214552464,0.02309156259614923,Original +1723,0.4406779661016949,0.4049435028248588,-2.885264224008987,0.06325673205698319,Original +1724,0.3389830508474576,0.2825564971751412,-4.239397969490223,0.024029911324908407,Original +1725,0.3389830508474576,0.2996468926553672,-2.9888839714804116,0.058182452145839086,Original +1726,0.3898305084745763,0.2979284369114878,-5.611049007782942,0.030324925803336726,Original +1727,0.3728813559322034,0.3248587570621469,-6.42539604115687,0.0076408389421474725,Original +1728,0.4576271186440678,0.3161016949152543,-4.91247301430133,0.016154741914472536,Original +1729,0.4576271186440678,0.39668079096045195,-4.042742188321896,0.02723902806724627,Original +1730,0.3898305084745763,0.25748587570621473,-2.8937262538718946,0.06282208280557401,Original +1731,0.4067796610169492,0.30409604519774014,-3.6040922833026183,0.036656113662429206,Original +1732,0.4915254237288136,0.4051553672316384,-6.7314755578400405,0.006693743268842144,Original +1733,0.45,0.326271186440678,-8.548884639029154,0.00336320130822139,Original +1734,0.4067796610169492,0.2996468926553672,-5.609714558498771,0.011196176065376142,Original +1735,0.4406779661016949,0.37542372881355934,-3.3908313960680885,0.04274868888333008,Original +1736,0.35,0.3093220338983051,-3.1999999999999997,0.04933184296269623,Original +1737,0.4745762711864407,0.38201506591337103,-6.299599690538863,0.024284334261122562,Original +1738,0.4915254237288136,0.35021186440677965,-13.369445204883407,0.0009045905316411595,Original +1739,0.4,0.2923728813559322,-5.374733592842522,0.012611364632413758,Original +1740,0.4915254237288136,0.4219632768361582,-39.40000000000022,3.59729328035016e-05,Original +1741,0.559322033898305,0.3708333333333333,-4.916019118203146,0.016122913876139212,Original +1742,0.45,0.3008474576271187,-13.987061690606863,0.0007913286284483068,Original +1743,0.43333333333333335,0.3177966101694915,-4.258337907495993,0.023747193827929766,Original +1744,0.3389830508474576,0.2572033898305085,-4.033986241649274,0.027394411372010617,Original +1745,0.4745762711864407,0.3540489642184557,-3.3684210526315805,0.07796676388459127,Original +1746,0.4745762711864407,0.38813559322033897,-4.245089541976653,0.02394449803344903,Original +1747,0.4067796610169492,0.3204802259887006,-5.112836449747386,0.014477393996814832,Original +1748,0.5333333333333333,0.41949152542372886,-6.516123792642811,0.007342666934462457,Original +1749,0.4576271186440678,0.4093220338983051,-4.384615384615384,0.02196803265379433,Original +1750,0.4576271186440678,0.32902542372881355,-4.153671286484361,0.025365339369804084,Original +1751,0.4406779661016949,0.367090395480226,-3.028199837684228,0.05639133971192534,Original +1752,0.4576271186440678,0.3800847457627119,-2.6499484677906273,0.07699722400216412,Original +1753,0.4745762711864407,0.3713983050847458,-3.082793063741217,0.054017563157762766,Original +1754,0.3389830508474576,0.2866290018832392,-4.709835325623289,0.04224443000728903,Original +1755,0.3559322033898305,0.2532015065913371,-2.2513229059561555,0.15321033928811528,Original +1756,0.4406779661016949,0.3763653483992467,-3.2352232779643906,0.0837182578425499,Original +1757,0.3728813559322034,0.33742937853107347,-2.4658853737705697,0.09039381717094448,Original +1758,0.4406779661016949,0.35847457627118645,-5.143398239932954,0.014241825510816557,Original +1759,0.45,0.3813559322033898,-4.676537180435968,0.018469641646674063,Original +1760,0.423728813559322,0.363135593220339,-2.293068697718329,0.10565819843234399,Original +1761,0.4406779661016949,0.38375706214689265,-2.3710395141336553,0.09841141140216388,Original +1762,0.5254237288135594,0.42664783427495295,-3.434253416143983,0.07533225481738264,Original +1763,0.2542372881355932,0.2189265536723164,-2.272727272727272,0.15095548648584448,Original +1764,0.43333333333333335,0.3940677966101695,-1.6131196118561335,0.20511808054337077,Original +1765,0.5254237288135594,0.4224576271186441,-2.122911802712327,0.1238400486795913,Original +1766,0.48333333333333334,0.3983050847457627,-3.4756486205215467,0.04018159901970025,Original +1767,0.48333333333333334,0.4067796610169492,-3.4985949560740326,0.03952103415356299,Original +1768,0.3559322033898305,0.23634651600753295,-3.432432432432432,0.07540339116695642,Original +1769,0.4576271186440678,0.3125,-3.6292186583582198,0.03601320371125468,Original +1770,0.4406779661016949,0.38757062146892657,-6.714285714285707,0.021470189769271106,Original +1771,0.4166666666666667,0.30508474576271183,-5.099428072506434,0.01458234892459302,Original +1772,0.38333333333333336,0.3432203389830508,-4.943805429141822,0.015876322370823124,Original +1773,0.423728813559322,0.28255649717514125,-7.881999450070924,0.004255505421232205,Original +1774,0.4915254237288136,0.40494350282485875,-5.023746418203985,0.015193736814334122,Original +1775,0.559322033898305,0.4595338983050848,-2.392754433686525,0.09650164759323027,Original +1776,0.4745762711864407,0.35459039548022603,-5.888771927947271,0.009773561677083785,Original +1777,0.4406779661016949,0.32478813559322034,-12.454354190804333,0.0011156250386489563,Original +1778,0.5084745762711864,0.3755649717514124,-3.3684353047674622,0.04346109463220811,Original +1779,0.423728813559322,0.3672316384180791,-4.082482904638627,0.026547885467199526,Original +1780,0.4576271186440678,0.35402542372881357,-2.5536843901385566,0.08367347896165804,Original +1781,0.4745762711864407,0.4175141242937853,-1.946993604286342,0.1467043534415175,Original +1782,0.4576271186440678,0.37966101694915255,-3.993713653195303,0.028123969733242882,Original +1783,0.4576271186440678,0.354590395480226,-2.8307905599569243,0.06614653353035789,Original +1784,0.3898305084745763,0.32090395480225986,-3.177686792530804,0.05018313128134387,Original +1785,0.5084745762711864,0.4304378531073446,-6.772008114046649,0.006580141599024013,Original +1786,0.4067796610169492,0.3839689265536724,-5.701874626813004,0.010698008944756929,Original +1787,0.5254237288135594,0.3542372881355932,-5.854047900359961,0.009937162378535997,Original +1788,0.5084745762711864,0.3412429378531074,-3.8779880896871726,0.030364316576825364,Original +1789,0.5084745762711864,0.42196327683615825,-3.458948005905017,0.04067122554425211,Original +1790,0.4166666666666667,0.3135593220338983,-1.8272855362744203,0.16511600726806364,Original +1791,0.4406779661016949,0.3377118644067797,-6.269358747755155,0.00819196116993917,Original +1792,0.5932203389830508,0.40939265536723163,-13.436639703627074,0.0008912634414880094,Original +1793,0.3898305084745763,0.2910310734463277,-4.146372958429369,0.025483409105270218,Original +1794,0.3728813559322034,0.3313559322033898,-1.9155635193164227,0.19549520636629938,Original +1795,0.4745762711864407,0.38728813559322034,-2.7837837837837838,0.10845063612547547,Original +1796,0.4915254237288136,0.321045197740113,-5.483588555093928,0.011928337117442485,Original +1797,0.4406779661016949,0.3612994350282486,-14.789473684210558,0.042980044870241256,Original +1798,0.5084745762711864,0.3962570621468926,-3.830072253349244,0.0313589410287926,Original +1799,0.4406779661016949,0.3246468926553672,-3.659896369736786,0.035247654519723955,Original +1800,0.3389830508474576,0.2919962335216572,-3.5407885315000662,0.07133333882574665,Original +1801,0.423728813559322,0.3584039548022599,-3.1685329501250483,0.050537730061605154,Original +1802,0.4067796610169492,0.32076271186440675,-6.808401479691212,0.006480293784839257,Original +1803,0.3898305084745763,0.34625706214689267,-2.0407541348149305,0.13394942774558222,Original +1804,0.4666666666666667,0.3516949152542373,-5.158515399269249,0.014127150588176329,Original +1805,0.4745762711864407,0.37146892655367225,-5.367066741284801,0.012661377799601295,Original +1806,0.4915254237288136,0.3967514124293785,-2.4964946840644773,0.08797820274589993,Original +1807,0.5333333333333333,0.4745762711864407,-2.3551362310207575,0.09983927564004276,Original +1808,0.4745762711864407,0.4009180790960452,-8.122451922976826,0.003901259023001135,Original +1809,0.4745762711864407,0.3456920903954802,-4.094924639556887,0.02633616071520719,Original +1810,0.4745762711864407,0.3463276836158192,-3.5851541120965997,0.03715039979658868,Original +1811,0.3898305084745763,0.3125,-2.848001248439178,0.06521603391989962,Original +1812,0.4067796610169492,0.2955508474576271,-5.314610961015995,0.013010610801463965,Original +1813,0.4915254237288136,0.3711864406779661,-6.051234040593998,0.009053328678904286,Original +1814,0.3559322033898305,0.2994350282485876,-2.6261286571944504,0.07858610153137385,Original +1815,0.4166666666666667,0.35593220338983056,-3.925344995453686,0.02942075287920854,Original +1816,0.5254237288135594,0.43481638418079094,-4.495559790237215,0.02054484962621015,Original +1817,0.3898305084745763,0.3416666666666667,-5.211728279194447,0.01373294914761014,Original +1818,0.5254237288135594,0.4387711864406779,-3.071254985149466,0.05450861721232613,Original +1819,0.4915254237288136,0.3923022598870056,-2.724050147464563,0.07230209895638379,Original +1820,0.576271186440678,0.40494350282485875,-6.15102387874345,0.008645202687168257,Original +1821,0.4067796610169492,0.3374293785310734,-2.6487965059751333,0.07707314575519472,Original +1822,0.423728813559322,0.2919962335216572,-9.926980271680543,0.009995759216947474,Original +1823,0.4745762711864407,0.3204802259887006,-6.442102133047244,0.007584755136376526,Original +1824,0.5,0.4279661016949153,-4.9770903720375195,0.015587358121496146,Original +1825,0.5333333333333333,0.3432203389830508,-10.881765043296808,0.0016608270081661155,Original +1826,0.4576271186440678,0.38799435028248586,-1.7579248960751477,0.17700539897601802,Original +1827,0.4915254237288136,0.36271186440677966,-3.4834542887072035,0.03995532706040686,Original +1828,0.4745762711864407,0.40932203389830507,-12.89316271749542,0.0010070852904372337,Original +1829,0.423728813559322,0.3586158192090395,-5.301653087443131,0.013098812973106142,Original +1830,0.45,0.41101694915254233,-2.6934842013379563,0.07419455966723891,Original +1831,0.423728813559322,0.36694915254237287,-2.416081794014973,0.0945001250957143,Original +1832,0.4576271186440678,0.35028248587570626,-4.358898943540671,0.02231600572520053,Original +1833,0.4067796610169492,0.35882768361581924,-2.596202410283425,0.08064011223189176,Original +1834,0.4406779661016949,0.3968220338983051,-2.5012480459007254,0.08761020945997577,Original +1835,0.4745762711864407,0.40918079096045196,-3.6547443425101043,0.03537475739142397,Original +1836,0.5084745762711864,0.4093220338983051,-6.728880964155069,0.006701102825924804,Original +1837,0.4406779661016949,0.3880649717514124,-3.6962350907584107,0.03436754879976233,Original +1838,0.43333333333333335,0.3135593220338983,-3.0363270659271957,0.056029764059456005,Original +1839,0.5254237288135594,0.3624293785310735,-4.007620942190811,0.02786923613375584,Original +1840,0.3898305084745763,0.3442090395480226,-1.1660649819494582,0.4512877748622783,Original +1841,0.4576271186440678,0.36299435028248583,-5.564047150200577,0.011454300879796516,Original +1842,0.45,0.35593220338983045,-4.53155602414888,0.020108869526374775,Original +1843,0.5,0.3728813559322034,-3.2995600879804483,0.04574853906846063,Original +1844,0.4745762711864407,0.3801553672316384,-2.6218256817858103,0.0788774316850411,Original +1845,0.4067796610169492,0.34625706214689267,-2.2952786442989725,0.10544407020716878,Original +1846,0.4576271186440678,0.34194915254237285,-7.414573731136393,0.005075512686812491,Original +1847,0.4406779661016949,0.29540960451977405,-4.994116161502297,0.015442194581693753,Original +1848,0.3389830508474576,0.2660310734463277,-4.308319914295902,0.02302147063382807,Original +1849,0.5254237288135594,0.392090395480226,-3.99713035331454,0.028061110834153927,Original +1850,0.4166666666666667,0.34745762711864403,-3.430686205868627,0.04151723186686211,Original +1851,0.3898305084745763,0.35854519774011295,-2.331934255279095,0.10196787590396043,Original +1852,0.4576271186440678,0.2998587570621469,-5.194000650735758,0.013862662574053051,Original +1853,0.4576271186440678,0.2990819209039548,-3.51802668500397,0.038972429516855533,Original +1854,0.4067796610169492,0.34173728813559323,-2.5913248074535873,0.08098113762773655,Original +1855,0.43333333333333335,0.3347457627118644,-5.6429955425120175,0.011012841275964533,Original +1856,0.4915254237288136,0.3794491525423729,-3.26894241254243,0.04681436566609524,Original +1857,0.5423728813559322,0.40084745762711865,-3.985615868631576,0.028273674351328677,Original +1858,0.4406779661016949,0.34597457627118644,-6.54645011829731,0.007246419253764158,Original +1859,0.423728813559322,0.2955508474576271,-3.1245537578699243,0.05228616491106145,Original +1860,0.423728813559322,0.2823446327683616,-3.3366481297841037,0.04449831598300532,Original +1861,0.4666666666666667,0.3898305084745763,-4.966017854713506,0.015682718436889013,Original +1862,0.4915254237288136,0.3248587570621469,-3.666081619407081,0.03509583190764937,Original +1863,0.4406779661016949,0.35466101694915253,-3.9441648534374134,0.029056249071256115,Original +1864,0.43333333333333335,0.36864406779661013,-1.786828180527595,0.17193506795662192,Original +1865,0.6440677966101694,0.4304378531073446,-11.862745098039216,0.001287998117223525,Original +1866,0.576271186440678,0.43036723163841806,-3.707083667393408,0.034110263406932406,Original +1867,0.5084745762711864,0.379590395480226,-3.550388805186171,0.03808013155197999,Original +1868,0.35,0.2838983050847458,-5.199999999999993,0.013818586905132142,Original +1869,0.4576271186440678,0.3840395480225988,-3.832436694700159,0.03130888812484261,Original +1870,0.4406779661016949,0.3586864406779661,-3.3503877475316646,0.04404617677686765,Original +1871,0.5423728813559322,0.39244350282485874,-9.21746279562317,0.0027010591887466347,Original +1872,0.4406779661016949,0.3964689265536723,-2.3349757965803564,0.10168572245232489,Original +1873,0.4745762711864407,0.40911016949152545,-2.509542503119258,0.08697258350325465,Original +1874,0.423728813559322,0.3287429378531073,-2.9086605723710677,0.06206405682692753,Original +1875,0.4745762711864407,0.3425141242937853,-2.187920293054828,0.11648102879543593,Original +1876,0.4406779661016949,0.36694915254237287,-2.2961086998148015,0.10536377934120532,Original +1877,0.45,0.4152542372881356,-3.175846343890085,0.05025417308948302,Original +1878,0.3559322033898305,0.29971751412429376,-3.099929158257806,0.05329849219831802,Original +1879,0.4666666666666667,0.3305084745762712,-6.072629199681584,0.008963729862100802,Original +1880,0.3898305084745763,0.31101694915254235,-7.153846153846161,0.08841694696574594,Original +1881,0.4067796610169492,0.3038135593220339,-7.40913346851445,0.005086235812941151,Original +1882,0.4666666666666667,0.4152542372881356,-2.2929844695893116,0.10566636994498525,Original +1883,0.5423728813559322,0.37521186440677967,-4.801612169541382,0.017192389686773397,Original +1884,0.4406779661016949,0.33785310734463275,-3.9583928774422366,0.028784526660768115,Original +1885,0.4745762711864407,0.3879237288135593,-2.7149672381577274,0.07285814285484306,Original +1886,0.5833333333333334,0.38983050847457623,-7.2205339907178026,0.005477206760423499,Original +1887,0.4166666666666667,0.3686440677966102,-5.918640302493723,0.009635652367993155,Original +1888,0.423728813559322,0.3987758945386064,-1.8820851842053508,0.20054056943775883,Original +1889,0.4915254237288136,0.43015536723163844,-2.973639372537061,0.05889612816583925,Original +1890,0.5254237288135594,0.33361581920903954,-4.658888265058127,0.018659682503791214,Original +1891,0.4915254237288136,0.37097457627118646,-3.2449363634867825,0.04767210916456234,Original +1892,0.43333333333333335,0.36016949152542377,-2.612964350541548,0.07948159398225181,Original +1893,0.4576271186440678,0.32937853107344633,-4.435658555321488,0.021297902760540714,Original +1894,0.423728813559322,0.3543785310734463,-7.723907549595242,0.004511849154258445,Original +1895,0.4915254237288136,0.41765536723163843,-6.264933372064462,0.008208341834463534,Original +1896,0.4406779661016949,0.3501412429378531,-4.144463927530725,0.02551440964648421,Original +1897,0.423728813559322,0.30798022598870056,-8.186715143005392,0.003813227168001511,Original +1898,0.4406779661016949,0.3123587570621469,-4.795397246883075,0.017253094776815005,Original +1899,0.5333333333333333,0.40254237288135597,-3.012278225167804,0.0571082206033358,Original +1900,0.4,0.3008474576271186,-3.099405715532204,0.053320277974167773,Original +1901,0.4067796610169492,0.29562146892655367,-3.85816763155035,0.030770761179515826,Original +1902,0.4745762711864407,0.3714689265536723,-4.782389009187055,0.017381059516950674,Original +1903,0.4166666666666667,0.3220338983050848,-3.7931280643843954,0.03215451606094398,Original +1904,0.4406779661016949,0.40084745762711865,-2.4921620337819483,0.08831527775786965,Original +1905,0.4915254237288136,0.4009180790960452,-9.991472499692488,0.0021337257653321886,Original +1906,0.36666666666666664,0.3220338983050848,-4.561067126598043,0.019760347058840888,Original +1907,0.5254237288135594,0.4262005649717514,-6.726801374615161,0.006707009265801296,Original +1908,0.4745762711864407,0.34611581920903955,-7.571218430433462,0.004779142305515517,Original +1909,0.3898305084745763,0.286864406779661,-11.301306940901064,0.0014858566277749805,Original +1910,0.4576271186440678,0.32076271186440675,-8.564496343017598,0.0033454209602583485,Original +1911,0.4745762711864407,0.3628531073446328,-4.591316366529474,0.019411195178700307,Original +1912,0.4576271186440678,0.3208333333333333,-10.424077229245048,0.0018843150010148248,Original +1913,0.45,0.30508474576271183,-3.4430305632479454,0.04114498062149175,Original +1914,0.4067796610169492,0.36709039548022593,-9.92864680939292,0.0021735197350935946,Original +1915,0.5333333333333333,0.3305084745762712,-41.4537493278151,3.0893720674523154e-05,Original +1916,0.5423728813559322,0.33785310734463275,-7.8732869320554375,0.004269124399355101,Original +1917,0.5932203389830508,0.38834745762711864,-10.638370665053376,0.0017750090525035277,Original +1918,0.4,0.3389830508474576,-3.9436024140371986,0.029067057994347754,Original +1919,0.4406779661016949,0.3209039548022599,-5.521881967348604,0.011699587084326106,Original +1920,0.5084745762711864,0.29964689265536726,-5.367242638173358,0.01266022746944358,Original +1921,0.4576271186440678,0.3714689265536723,-5.21158171207224,0.013734015073651634,Original +1922,0.4406779661016949,0.35035310734463276,-4.9456020241512695,0.015860547909964277,Original +1923,0.3728813559322034,0.2750470809792844,-4.254446720326273,0.05105369953469059,Original +1924,0.4067796610169492,0.34597457627118644,-14.428366472211776,0.0007217047864811893,Original +1925,0.5084745762711864,0.3921610169491525,-5.123685756910068,0.014393190365221242,Original +1926,0.4406779661016949,0.3547316384180791,-2.6999881415740097,0.07378677879364155,Original +1927,0.4576271186440678,0.31221751412429377,-4.544899514360726,0.01995030538814641,Original +1928,0.4406779661016949,0.34246704331450095,-4.8064516129032215,0.04066429179543451,Original +1929,0.559322033898305,0.3800847457627119,-3.055206987855407,0.05520098921136872,Original +1930,0.35,0.2754237288135593,-2.66340767777208,0.07611699066362566,Original +1931,0.5254237288135594,0.4429378531073446,-6.248223190659335,0.008270584465932308,Original +1932,0.3898305084745763,0.2992231638418079,-3.1375946196106543,0.05175986194673748,Original +1933,0.4576271186440678,0.33771186440677964,-7.301351957262182,0.005304981348263639,Original +1934,0.4067796610169492,0.3080508474576271,-3.1525851210756604,0.051163101927519376,Original +1935,0.3389830508474576,0.29124293785310734,-3.5699799446212253,0.03755260022285919,Original +1936,0.423728813559322,0.38001412429378534,-2.034149230295387,0.13480390529326824,Original +1937,0.3898305084745763,0.3374293785310734,-2.3577069273967326,0.09960676963753387,Original +1938,0.4576271186440678,0.3080508474576271,-17.7463880302814,0.0003901203293975093,Original +1939,0.4576271186440678,0.3704331450094162,-1.7365199955826325,0.22460571149425088,Original +1940,0.4406779661016949,0.328954802259887,-5.202033883431729,0.013803686006307778,Original +1941,0.4745762711864407,0.3628531073446327,-3.2057938752604676,0.04911378606623852,Original +1942,0.4745762711864407,0.35444915254237286,-3.7842912965837407,0.032348636596183485,Original +1943,0.5932203389830508,0.30007062146892655,-5.840104693319583,0.010003865123379863,Original +1944,0.4745762711864407,0.3880414312617702,-2.3384829558815743,0.14430805754256285,Original +1945,0.4067796610169492,0.37071563088512244,-2.2572802622847377,0.15257812860009795,Original +1946,0.3898305084745763,0.358545197740113,-2.3319342552790907,0.10196787590396081,Original +1947,0.423728813559322,0.3627824858757062,-1.9704993917573732,0.1433770749616963,Original +1948,0.4666666666666667,0.3813559322033898,-2.4415252920324195,0.09237446595146682,Original +1949,0.4915254237288136,0.3713276836158192,-6.5420904956073125,0.007260153005952,Original +1950,0.4406779661016949,0.3334745762711865,-3.470097292282929,0.04034351536878077,Original +1951,0.4745762711864407,0.3879237288135593,-2.1390171916328864,0.12196630264944257,Original +1952,0.48333333333333334,0.40254237288135597,-2.8010960513215193,0.06779122416384437,Original +1953,0.423728813559322,0.379590395480226,-2.325606550774779,0.10255792463406842,Original +1954,0.4915254237288136,0.40433145009416194,-2.662987333476817,0.11681579707766619,Original +1955,0.4915254237288136,0.3373587570621469,-5.418875755811864,0.012328378409982957,Original +1956,0.4406779661016949,0.32902542372881355,-5.097322089843262,0.014598923701575601,Original +1957,0.4745762711864407,0.38375706214689265,-2.795942882520684,0.06808180891277789,Original +1958,0.4406779661016949,0.3250706214689265,-4.171340373881652,0.02508240388592301,Original +1959,0.4915254237288136,0.3246468926553673,-6.228587468296307,0.008344518944513782,Original +1960,0.5,0.2838983050847458,-8.87796045374059,0.0030132990718159855,Original +1961,0.3898305084745763,0.2824858757062147,-4.417410272265133,0.021534399027710104,Original +1962,0.5084745762711864,0.39279661016949147,-3.218304471414686,0.04864708009336623,Original +1963,0.4406779661016949,0.3543785310734463,-3.815695684869742,0.03166549916342224,Original +1964,0.4406779661016949,0.31228813559322033,-11.326278432886252,0.0014762298056976652,Original +1965,0.4576271186440678,0.4092514124293785,-3.953202519249618,0.028883270408863487,Original +1966,0.423728813559322,0.36264124293785305,-2.925487770616238,0.06122360955751544,Original +1967,0.4576271186440678,0.359322033898305,-1.8552679320224146,0.20470801229945912,Original +1968,0.5254237288135594,0.3965395480225989,-9.383684565305357,0.002563738947888246,Original +1969,0.45,0.3474576271186441,-5.812651983124662,0.010136920657992399,Original +1970,0.4067796610169492,0.3081214689265537,-3.056600083912935,0.05514044883654204,Original +1971,0.4406779661016949,0.3456920903954802,-2.7626457956106245,0.06999725904230607,Original +1972,0.4067796610169492,0.3418079096045198,-4.4263520637871325,0.021418091531717674,Original +1973,0.5423728813559322,0.37528248587570623,-8.13082423873902,0.0038896389696946266,Original +1974,0.45,0.3050847457627119,-8.55,0.0033619268709983345,Original +1975,0.4406779661016949,0.3840395480225989,-6.277129309215832,0.00816330194750534,Original +1976,0.4166666666666667,0.3771186440677966,-1.974967724404075,0.14275464845123892,Original +1977,0.4406779661016949,0.36730225988700566,-2.6278882027424713,0.07846735545649752,Original +1978,0.6440677966101694,0.4809322033898305,-5.744562646538028,0.010477088755276323,Original +1979,0.4576271186440678,0.4137005649717514,-2.715044307099425,0.0728534025239453,Original +1980,0.559322033898305,0.2867937853107345,-3.7484025543573445,0.03315263271031142,Original +1981,0.4745762711864407,0.3584039548022599,-3.4358556656684405,0.041360824576078994,Original +1982,0.423728813559322,0.32895480225988705,-3.853459154520629,0.03086833989707754,Original +1983,0.43333333333333335,0.3135593220338983,-4.2613603366990604,0.0237024761862663,Original +1984,0.6101694915254238,0.4471751412429379,-10.661446386757238,0.0017637451315993058,Original +1985,0.3898305084745763,0.354590395480226,-2.6581313276202945,0.07646057355190569,Original +1986,0.5166666666666667,0.42372881355932207,-2.2081034216541404,0.11430368101220882,Original +1987,0.4406779661016949,0.3291666666666667,-4.203856330713102,0.024572283074481203,Original +1988,0.5254237288135594,0.400635593220339,-6.380724773314517,0.0077935062804952064,Original +1989,0.5084745762711864,0.35861581920903957,-2.999030425264594,0.05771345344001608,Original +1990,0.3728813559322034,0.295409604519774,-3.600128581629112,0.03675886811551069,Original +1991,0.559322033898305,0.40508474576271186,-8.389086394729976,0.003552594693743783,Original +1992,0.4745762711864407,0.3540254237288135,-2.4793636360304507,0.08932026186301084,Original +1993,0.4915254237288136,0.42189265536723164,-3.2416676813361778,0.04779043382080003,Original +1994,0.4406779661016949,0.3673728813559322,-2.8797359104311173,0.06354272267077887,Original +1995,0.4,0.3474576271186441,-2.978383660774621,0.05867285324441682,Original +1996,0.4406779661016949,0.3712570621468927,-2.386590270664442,0.09703914637860833,Original +1997,0.4745762711864407,0.4093926553672317,-2.226010109872328,0.11241274845462867,Original +1998,0.4406779661016949,0.38834745762711864,-3.1550201885686455,0.051066984905793825,Original +1999,0.5423728813559322,0.37097457627118646,-5.074685831579439,0.014778639324226026,Original +2000,0.48333333333333334,0.22814207650273222,-21.334991021776784,0.00022530414214046572,Watermarked +2001,0.4098360655737705,0.23333333333333334,-4.992251154606664,0.015458009685690827,Watermarked +2002,0.65,0.34815573770491803,-6.977885499593617,0.0060406875581721555,Watermarked +2003,0.6333333333333333,0.2573087431693989,-17.794177111160675,0.0003870090924213516,Watermarked +2004,0.4,0.3151639344262295,-1.713189822924711,0.18519433572899746,Watermarked +2005,0.45901639344262296,0.20416666666666666,-8.101361023294555,0.003930735409185079,Watermarked +2006,0.4166666666666667,0.27424863387978143,-2.647512144273123,0.07715790266759627,Watermarked +2007,0.5333333333333333,0.28599726775956286,-9.817142706536112,0.002246603044354501,Watermarked +2008,0.5245901639344263,0.2375,-5.570367388129549,0.011418116056075428,Watermarked +2009,0.4666666666666667,0.2775273224043716,-4.727075685541707,0.01793917650756737,Watermarked +2010,0.4098360655737705,0.25,-3.714254520543179,0.033941551397426564,Watermarked +2011,0.6333333333333333,0.24904371584699453,-14.463091326070671,0.0007165776065197027,Watermarked +2012,0.5081967213114754,0.275,-5.83498532451519,0.01002850287511932,Watermarked +2013,0.5245901639344263,0.23750000000000002,-15.536893060799459,0.0005793474370025991,Watermarked +2014,0.7,0.32745901639344266,-6.77625507348341,0.006568385846444286,Watermarked +2015,0.4426229508196721,0.21666666666666667,-4.119026835630454,0.025932160329463834,Watermarked +2016,0.6721311475409836,0.23333333333333334,-9.613578441019637,0.0023886490069146135,Watermarked +2017,0.38333333333333336,0.2775956284153005,-2.196141651943659,0.11558815206376069,Watermarked +2018,0.43333333333333335,0.29480874316939887,-3.9127157656292244,0.029668656491470317,Watermarked +2019,0.3770491803278688,0.2125,-4.70897478231848,0.018126865049367218,Watermarked +2020,0.6166666666666667,0.3523907103825137,-5.470026246143757,0.012010754748633298,Watermarked +2021,0.3442622950819672,0.2875,-2.0013563154719005,0.13914298161809877,Watermarked +2022,0.35,0.22786885245901642,-2.9204148617045544,0.06147547219131401,Watermarked +2023,0.43333333333333335,0.2573770491803279,-6.707544913199492,0.0067620282205820385,Watermarked +2024,0.6229508196721312,0.20833333333333334,-7.9670319147285165,0.004125551420928103,Watermarked +2025,0.3770491803278688,0.24583333333333332,-12.513573727485,0.0011000978118262336,Watermarked +2026,0.48333333333333334,0.31953551912568307,-7.4329764222856545,0.005039460040419282,Watermarked +2027,0.4918032786885246,0.2,-10.721537070870632,0.0017348546540608164,Watermarked +2028,0.45,0.1825136612021858,-9.313256255544996,0.002620765891462303,Watermarked +2029,0.45,0.27759562841530055,-4.406020658142731,0.02168373286648725,Watermarked +2030,0.5333333333333333,0.21967213114754097,-6.698633516057464,0.006787690665758875,Watermarked +2031,0.45,0.2444672131147541,-5.639134297794538,0.011033909197241593,Watermarked +2032,0.4,0.23224043715846998,-9.669790958355271,0.0023482624188435656,Watermarked +2033,0.5,0.21530054644808744,-4.776258392255463,0.017441795571057156,Watermarked +2034,0.5737704918032787,0.13749999999999998,-41.60551556348084,3.055734072793683e-05,Watermarked +2035,0.5245901639344263,0.19583333333333333,-11.940183637404086,0.001263509863921225,Watermarked +2036,0.36666666666666664,0.29439890710382516,-2.2491233682903635,0.11002736160816107,Watermarked +2037,0.55,0.21598360655737703,-12.468162549596142,0.0011119786230779946,Watermarked +2038,0.5409836065573771,0.27083333333333337,-8.587746675724997,0.0033191706279838665,Watermarked +2039,0.45,0.26584699453551913,-3.724628638572125,0.03369936057429459,Watermarked +2040,0.5166666666666667,0.34460382513661203,-3.314218356932012,0.0452491572848592,Watermarked +2041,0.5573770491803278,0.20416666666666666,-12.828316972577708,0.001022214116280989,Watermarked +2042,0.4166666666666667,0.27759562841530055,-4.08198313178942,0.026556436001686043,Watermarked +2043,0.6229508196721312,0.21250000000000002,-32.83606557377048,6.20825070326001e-05,Watermarked +2044,0.43333333333333335,0.24849726775956282,-3.748296362699404,0.03315504943587395,Watermarked +2045,0.5245901639344263,0.2625,-10.53248144497122,0.0018279382457190715,Watermarked +2046,0.4426229508196721,0.29166666666666663,-2.5878220140515227,0.0812271381568774,Watermarked +2047,0.5166666666666667,0.21598360655737703,-8.750839688124422,0.003142480189931068,Watermarked +2048,0.6333333333333333,0.232103825136612,-8.35174130824408,0.0035988783002935975,Watermarked +2049,0.5901639344262295,0.22083333333333333,-13.413790344368145,0.0008957658359722933,Watermarked +2050,0.5,0.27814207650273226,-3.820785157614083,0.03155653621002948,Watermarked +2051,0.7,0.20744535519125684,-28.591452014534944,9.39404974108921e-05,Watermarked +2052,0.39344262295081966,0.29583333333333334,-9.308639696291548,0.0026245624151365297,Watermarked +2053,0.45,0.20348360655737702,-5.6752883391613915,0.010838689050252247,Watermarked +2054,0.35,0.26939890710382514,-2.481709453531588,0.08913501383686977,Watermarked +2055,0.45,0.28592896174863386,-3.800322116899045,0.031997583361784786,Watermarked +2056,0.48333333333333334,0.2533469945355191,-6.384490208675115,0.007780483735954091,Watermarked +2057,0.4426229508196721,0.3333333333333333,-5.678855106783206,0.010819675519646264,Watermarked +2058,0.38333333333333336,0.26536885245901637,-2.8854448330676328,0.06324741595265697,Watermarked +2059,0.4918032786885246,0.2583333333333333,-6.9322595553517825,0.006155066763755107,Watermarked +2060,0.4426229508196721,0.3125,-7.574268290069089,0.004773601555369254,Watermarked +2061,0.6333333333333333,0.27377049180327867,-36.553205244976375,4.503243730199633e-05,Watermarked +2062,0.6,0.14508196721311475,-16.724450142912833,0.00046542921319575733,Watermarked +2063,0.6721311475409836,0.2791666666666667,-14.728977904018867,0.000678879499435165,Watermarked +2064,0.4166666666666667,0.30293715846994534,-4.392324491038346,0.021865089257387872,Watermarked +2065,0.4918032786885246,0.26249999999999996,-2.9860360155946784,0.05831495178179603,Watermarked +2066,0.5666666666666667,0.18237704918032788,-16.083566531034208,0.0005227722151560427,Watermarked +2067,0.45,0.24002732240437158,-3.3909481332454887,0.04274501456307789,Watermarked +2068,0.35,0.25703551912568307,-2.6079704990661914,0.07982458937806818,Watermarked +2069,0.38333333333333336,0.2782103825136612,-3.2919830028303934,0.04600941654903949,Watermarked +2070,0.5,0.27342896174863385,-3.9770819524294616,0.02843255328181686,Watermarked +2071,0.6721311475409836,0.22083333333333333,-8.141189027892935,0.0038753168087557023,Watermarked +2072,0.45901639344262296,0.27499999999999997,-7.102306152917264,0.005742682031524343,Watermarked +2073,0.45,0.21974043715846997,-4.850332934764336,0.016725995889598343,Watermarked +2074,0.4166666666666667,0.307103825136612,-12.422543126716244,0.0011240861310550566,Watermarked +2075,0.7049180327868853,0.19583333333333333,-12.238447225159446,0.001174764152702687,Watermarked +2076,0.45901639344262296,0.2416666666666667,-4.10684476131458,0.026135349573198702,Watermarked +2077,0.6666666666666666,0.21591530054644809,-21.638546889559418,0.00021600181591014574,Watermarked +2078,0.39344262295081966,0.25416666666666665,-3.491255808134439,0.0397307943380083,Watermarked +2079,0.38333333333333336,0.24849726775956282,-2.5061607287884398,0.08723186353716599,Watermarked +2080,0.45901639344262296,0.24166666666666667,-5.438466110898458,0.012205447523675413,Watermarked +2081,0.36065573770491804,0.275,-1.7374154679173113,0.18070726370520965,Watermarked +2082,0.5166666666666667,0.25669398907103824,-3.5751586418022976,0.03741471383610712,Watermarked +2083,0.6721311475409836,0.2791666666666667,-9.089163278771835,0.002813782076578305,Watermarked +2084,0.45901639344262296,0.2708333333333333,-5.018214936247723,0.01523972222476046,Watermarked +2085,0.6557377049180327,0.23333333333333334,-19.631581158153004,0.00028877725744096686,Watermarked +2086,0.43333333333333335,0.26133879781420766,-3.406748909038209,0.04225136416729629,Watermarked +2087,0.36666666666666664,0.22370218579234974,-3.560576108205079,0.03780464605083627,Watermarked +2088,0.6,0.29077868852459016,-8.670001234457226,0.003228460547141703,Watermarked +2089,0.6721311475409836,0.2791666666666667,-11.945500838297065,0.001261851176283919,Watermarked +2090,0.4166666666666667,0.2780054644808743,-7.059001645570319,0.005844158386152648,Watermarked +2091,0.6885245901639344,0.275,-14.961882623555587,0.0006479949161931086,Watermarked +2092,0.5081967213114754,0.24583333333333335,-10.170122389206956,0.002025718899581995,Watermarked +2093,0.5737704918032787,0.22916666666666669,-18.649446940693135,0.0003365083085818159,Watermarked +2094,0.45,0.24890710382513662,-17.77843472912827,0.0003880303178033284,Watermarked +2095,0.5573770491803278,0.2,-11.19804462246161,0.001526562128286031,Watermarked +2096,0.4,0.21154371584699455,-6.028584041580653,0.009149467578500266,Watermarked +2097,0.39344262295081966,0.2375,-5.077417805154715,0.014756796916773422,Watermarked +2098,0.43333333333333335,0.2695355191256831,-4.505004550324794,0.02042928010369098,Watermarked +2099,0.48333333333333334,0.30327868852459017,-5.139479033055133,0.014271753686067527,Watermarked +2100,0.5666666666666667,0.23162568306010928,-5.174508393880651,0.01400713962990326,Watermarked +2101,0.3770491803278688,0.2333333333333333,-2.6611003960675528,0.07626699967069282,Watermarked +2102,0.4918032786885246,0.2375,-8.083990107136325,0.003955234173311845,Watermarked +2103,0.5409836065573771,0.16666666666666669,-20.79300879817042,0.00024328442858722197,Watermarked +2104,0.5737704918032787,0.22916666666666663,-7.437123752141218,0.0050313817019966775,Watermarked +2105,0.5901639344262295,0.23750000000000002,-20.5280562633211,0.00025277231465770014,Watermarked +2106,0.6721311475409836,0.35,-8.241430969943103,0.00374033200083373,Watermarked +2107,0.6065573770491803,0.1958333333333333,-10.95264116575592,0.0016294169815328763,Watermarked +2108,0.36065573770491804,0.26666666666666666,-2.270928029445486,0.10783275809661891,Watermarked +2109,0.5081967213114754,0.2416666666666667,-11.07944631333403,0.0015751594215650242,Watermarked +2110,0.3770491803278688,0.25,-2.8812045893326337,0.063466587684043,Watermarked +2111,0.48333333333333334,0.19904371584699454,-12.288393956261835,0.0011607197809402983,Watermarked +2112,0.5333333333333333,0.23633879781420766,-4.584804480637148,0.019485678388664683,Watermarked +2113,0.4,0.24881602914389797,-3.563314918926205,0.07052714913781105,Watermarked +2114,0.39344262295081966,0.19583333333333333,-4.628326083672306,0.018994819352054024,Watermarked +2115,0.5,0.298155737704918,-4.033530534554061,0.027402529576258054,Watermarked +2116,0.48333333333333334,0.2693306010928962,-6.652844379359568,0.006921592104201834,Watermarked +2117,0.4,0.22404371584699456,-12.321363422263417,0.0011515711663196136,Watermarked +2118,0.5,0.2528688524590164,-9.302810429704943,0.002629366644977586,Watermarked +2119,0.55,0.2778688524590164,-4.536961611771408,0.020044437314205348,Watermarked +2120,0.5833333333333334,0.29453551912568304,-13.407344687092644,0.0008970413965513573,Watermarked +2121,0.8360655737704918,0.22083333333333333,-33.295454627318755,5.9553511839070765e-05,Watermarked +2122,0.6,0.257172131147541,-22.28671843401477,0.0001977851067158647,Watermarked +2123,0.6721311475409836,0.14166666666666666,-49.30752240433902,1.836912647385703e-05,Watermarked +2124,0.7868852459016393,0.19166666666666665,-18.866186551774142,0.00032511960811709917,Watermarked +2125,0.7049180327868853,0.17500000000000002,-13.259465772580212,0.0009269807118292367,Watermarked +2126,0.6333333333333333,0.2614071038251366,-11.56614172997057,0.00138784136278078,Watermarked +2127,0.4918032786885246,0.26666666666666666,-3.4878013959288654,0.039830015747165624,Watermarked +2128,0.48333333333333334,0.22418032786885247,-17.367164105589854,0.0004160299549552091,Watermarked +2129,0.36666666666666664,0.23654371584699457,-7.528860163609132,0.0048569805574603395,Watermarked +2130,0.5081967213114754,0.22916666666666666,-7.321262735084583,0.005263650485292702,Watermarked +2131,0.5737704918032787,0.25,-9.922022842122464,0.0021777725182429894,Watermarked +2132,0.6885245901639344,0.14166666666666666,-27.567153326380936,0.00010477109294231701,Watermarked +2133,0.7704918032786885,0.19999999999999998,-17.482855438271482,0.00040788898005848117,Watermarked +2134,0.43333333333333335,0.2941939890710382,-4.012426942160979,0.027781897561995554,Watermarked +2135,0.8524590163934426,0.275,-22.287637793898487,0.00019776074953457593,Watermarked +2136,0.4098360655737705,0.3,-3.441600870206092,0.04118787585017441,Watermarked +2137,0.48333333333333334,0.3110655737704918,-4.021031471649375,0.027626407165013953,Watermarked +2138,0.43333333333333335,0.28572404371584703,-3.0768255794880175,0.054270837524734016,Watermarked +2139,0.55,0.2030737704918033,-11.28407580595368,0.0014925480755074861,Watermarked +2140,0.3770491803278688,0.26666666666666666,-2.4183597074545036,0.09430740171744867,Watermarked +2141,0.36666666666666664,0.24453551912568308,-3.448799623718753,0.0409724672224204,Watermarked +2142,0.36666666666666664,0.21550546448087432,-2.656953690777749,0.07653752004862353,Watermarked +2143,0.639344262295082,0.25416666666666665,-36.732973581097305,4.437567678781254e-05,Watermarked +2144,0.4426229508196721,0.2583333333333333,-9.290054918133373,0.002639919880417069,Watermarked +2145,0.639344262295082,0.23333333333333334,-9.555035305421034,0.0024316941386992286,Watermarked +2146,0.45901639344262296,0.25,-5.347493377444,0.012790241471607905,Watermarked +2147,0.55,0.2739071038251366,-24.893769141544713,0.00014212863367693852,Watermarked +2148,0.35,0.26994535519125684,-3.13501995090005,0.051863239303626886,Watermarked +2149,0.7213114754098361,0.2875,-13.79033606511491,0.0008252499042687573,Watermarked +2150,0.5409836065573771,0.26666666666666666,-3.469865323288583,0.04035029930545199,Watermarked +2151,0.5,0.2864071038251366,-9.166199952585503,0.00274536805342564,Watermarked +2152,0.45,0.27363387978142073,-4.238851608241817,0.02403813124545163,Watermarked +2153,0.6721311475409836,0.23750000000000002,-12.680742877830454,0.001057786978105176,Watermarked +2154,0.5573770491803278,0.21250000000000002,-5.6891750976260695,0.010764907709515388,Watermarked +2155,0.5901639344262295,0.22916666666666669,-5.917936455036488,0.009638872740618382,Watermarked +2156,0.5409836065573771,0.27499999999999997,-7.078346628927314,0.005798537274283227,Watermarked +2157,0.5,0.2573087431693989,-12.130613891363168,0.0012058641450002766,Watermarked +2158,0.38333333333333336,0.25314207650273224,-3.7198098893146967,0.03381158141645187,Watermarked +2159,0.36666666666666664,0.29453551912568304,-4.000535767249803,0.02799863936714751,Watermarked +2160,0.45,0.1867486338797814,-18.377401251596062,0.00035156723441787536,Watermarked +2161,0.5737704918032787,0.2833333333333333,-12.870123433871873,0.0010124261840382058,Watermarked +2162,0.4426229508196721,0.2625,-4.652549903587426,0.018728552464152025,Watermarked +2163,0.4666666666666667,0.24863387978142074,-5.629498300891231,0.01108671624446389,Watermarked +2164,0.47540983606557374,0.22916666666666669,-4.253993084342693,0.023811667807213974,Watermarked +2165,0.4918032786885246,0.22916666666666666,-10.55444127076214,0.0018167912040410702,Watermarked +2166,0.45,0.28237704918032785,-9.511627906976749,0.0024642759907036603,Watermarked +2167,0.5573770491803278,0.29166666666666663,-55.22686591346667,1.307697160598242e-05,Watermarked +2168,0.5081967213114754,0.23333333333333334,-5.497267759562843,0.011845956731503078,Watermarked +2169,0.43333333333333335,0.2941256830601093,-2.1752046582440823,0.1178783573581168,Watermarked +2170,0.4098360655737705,0.325,-3.3934426229508206,0.04266659593484531,Watermarked +2171,0.5573770491803278,0.18333333333333335,-10.579553917424954,0.0018041539811479287,Watermarked +2172,0.4,0.20737704918032787,-4.581868223019867,0.019519384720467742,Watermarked +2173,0.5,0.31489071038251365,-3.002786519391192,0.05754103974524036,Watermarked +2174,0.31666666666666665,0.23674863387978146,-3.2609722394321854,0.04709695971232935,Watermarked +2175,0.48333333333333334,0.2693306010928962,-5.216676982893284,0.013697022421412816,Watermarked +2176,0.5833333333333334,0.30703551912568305,-26.318257342843076,0.00012035017922562646,Watermarked +2177,0.6,0.27363387978142073,-11.501316579943088,0.0014110204730729838,Watermarked +2178,0.4918032786885246,0.28750000000000003,-6.210497654675213,0.00841340269771356,Watermarked +2179,0.5901639344262295,0.20833333333333334,-23.92854336374633,0.00015995510518182317,Watermarked +2180,0.36065573770491804,0.3,-2.5734050069412073,0.08224938731599696,Watermarked +2181,0.47540983606557374,0.2833333333333333,-3.914708631165495,0.029629359927274702,Watermarked +2182,0.5081967213114754,0.3,-5.782581278907251,0.010285339228476416,Watermarked +2183,0.4666666666666667,0.2489754098360656,-9.227144083318263,0.002692797704338326,Watermarked +2184,0.5901639344262295,0.275,-4.488369320415114,0.02063340350086202,Watermarked +2185,0.5409836065573771,0.2791666666666667,-7.354405200045387,0.00519578998775744,Watermarked +2186,0.4098360655737705,0.2708333333333333,-9.766999910411144,0.002280530521151582,Watermarked +2187,0.3333333333333333,0.22659380692167577,-9.085817324099237,0.011897809590563803,Watermarked +2188,0.5573770491803278,0.25416666666666665,-6.766299230685365,0.0065959876919741,Watermarked +2189,0.7213114754098361,0.27499999999999997,-8.885189155139434,0.0030061660806281146,Watermarked +2190,0.48333333333333334,0.34446721311475414,-3.3256460363533833,0.04486462240497135,Watermarked +2191,0.65,0.26584699453551913,-15.779833617982495,0.0005532481835581896,Watermarked +2192,0.5666666666666667,0.2067622950819672,-6.043879310010205,0.009084400202355151,Watermarked +2193,0.6229508196721312,0.225,-9.957417838496047,0.0021551750735026,Watermarked +2194,0.4426229508196721,0.2583333333333333,-3.3466253328190545,0.04416940727951125,Watermarked +2195,0.45,0.2692622950819672,-4.2061085388157045,0.02453744768447861,Watermarked +2196,0.45,0.23654371584699452,-14.867978061029115,0.0006602177614744704,Watermarked +2197,0.6229508196721312,0.1875,-34.83606557377048,5.20111686902031e-05,Watermarked +2198,0.5833333333333334,0.20710382513661202,-10.283901901516982,0.001960684970337362,Watermarked +2199,0.4666666666666667,0.28592896174863386,-4.828799478606252,0.01693007546409189,Watermarked +2200,0.7213114754098361,0.3416666666666667,-7.851608784383327,0.004303261311903815,Watermarked +2201,0.6065573770491803,0.37916666666666665,-6.507337920439277,0.007370864736714179,Watermarked +2202,0.6666666666666666,0.489275956284153,-6.06668077104039,0.00898852350687363,Watermarked +2203,0.5833333333333334,0.373155737704918,-5.4116120970679615,0.01237436958129694,Watermarked +2204,0.4918032786885246,0.3875,-3.170655272045821,0.050455235210516786,Watermarked +2205,0.5901639344262295,0.4125,-5.932056727240316,0.0095745359301512,Watermarked +2206,0.6065573770491803,0.39999999999999997,-4.741062246648036,0.017795875960800792,Watermarked +2207,0.6557377049180327,0.39583333333333337,-6.819438116767888,0.006450408520510395,Watermarked +2208,0.6166666666666667,0.41427595628415304,-3.5454803247028934,0.03821378114440935,Watermarked +2209,0.5737704918032787,0.37083333333333335,-10.982657455064276,0.0016163522474045274,Watermarked +2210,0.5833333333333334,0.4810109289617486,-2.397936955599478,0.09605254927181417,Watermarked +2211,0.6557377049180327,0.39166666666666666,-4.380390416901878,0.022024716850259,Watermarked +2212,0.5245901639344263,0.36249999999999993,-5.412060981830556,0.012371520890864776,Watermarked +2213,0.5666666666666667,0.4185792349726776,-3.1318929933163084,0.05198914240331166,Watermarked +2214,0.6,0.3980191256830601,-6.2294612621620695,0.008341210430613992,Watermarked +2215,0.55,0.43142076502732246,-11.855239270966623,0.00129040506090979,Watermarked +2216,0.5166666666666667,0.4312158469945355,-2.769971026007625,0.06957017387261798,Watermarked +2217,0.5666666666666667,0.3818306010928962,-4.847117449604144,0.016756266049925205,Watermarked +2218,0.4666666666666667,0.4064890710382514,-1.6875505812729021,0.19008194720260654,Watermarked +2219,0.5573770491803278,0.4,-8.742170122442447,0.003151555065586373,Watermarked +2220,0.6,0.47274590163934427,-3.8037548196566555,0.03192304772258143,Watermarked +2221,0.6666666666666666,0.3689207650273224,-6.200033744114341,0.00845358911672797,Watermarked +2222,0.6229508196721312,0.425,-12.405173285892113,0.0011287421498101565,Watermarked +2223,0.5833333333333334,0.3605874316939891,-5.648887116271631,0.010980796736842062,Watermarked +2224,0.5833333333333334,0.39364754098360655,-4.293991625472901,0.023226546554816672,Watermarked +2225,0.5833333333333334,0.40614754098360656,-4.422690436146169,0.02146562039496988,Watermarked +2226,0.6166666666666667,0.38148907103825136,-8.30926126344939,0.0036525000991545153,Watermarked +2227,0.6065573770491803,0.38749999999999996,-3.4641024571780417,0.04051930159431608,Watermarked +2228,0.6166666666666667,0.3691939890710383,-6.170366319105602,0.008568905214827621,Watermarked +2229,0.5573770491803278,0.4041666666666667,-3.05362313626129,0.05526992091860083,Watermarked +2230,0.6833333333333333,0.42711748633879776,-7.960605911691153,0.004135187577765976,Watermarked +2231,0.6666666666666666,0.45163934426229513,-3.2153935344715583,0.04875516994578165,Watermarked +2232,0.55,0.43995901639344265,-3.5143217545081513,0.03907627630046548,Watermarked +2233,0.6666666666666666,0.3816256830601093,-11.100144808896685,0.0015665315397856033,Watermarked +2234,0.5333333333333333,0.4523224043715847,-2.5642179367732165,0.082909074165789,Watermarked +2235,0.55,0.4187158469945355,-3.611089663775107,0.03647560832552664,Watermarked +2236,0.5666666666666667,0.4362021857923497,-2.402996731954332,0.09561654218317737,Watermarked +2237,0.75,0.44371584699453553,-9.529761387107365,0.0024505949878125062,Watermarked +2238,0.4666666666666667,0.42315573770491804,-2.2737870570417353,0.10754893601572461,Watermarked +2239,0.6557377049180327,0.4083333333333333,-7.346004713131698,0.005212880633794737,Watermarked +2240,0.5737704918032787,0.35833333333333334,-4.3698631465799025,0.022166781498566,Watermarked +2241,0.6885245901639344,0.4,-16.027311891144493,0.0005282450921087477,Watermarked +2242,0.5,0.40642076502732244,-3.4412842582299485,0.04119738288603227,Watermarked +2243,0.5409836065573771,0.36666666666666664,-3.8190928053365694,0.031592715134547304,Watermarked +2244,0.5573770491803278,0.4458333333333333,-2.433681073025334,0.09302352271601963,Watermarked +2245,0.65,0.3774590163934426,-8.635338782373333,0.00326628135311723,Watermarked +2246,0.55,0.41448087431693986,-2.7772482375001317,0.06914909220734049,Watermarked +2247,0.5573770491803278,0.3916666666666667,-2.4539091507908326,0.09136101012842844,Watermarked +2248,0.6229508196721312,0.425,-3.8366329971259767,0.031220308656298136,Watermarked +2249,0.5245901639344263,0.425,-2.4919181614361894,0.08833429775357093,Watermarked +2250,0.5833333333333334,0.4394808743169399,-3.2347368286888813,0.048042559031082566,Watermarked +2251,0.5,0.41051912568306015,-3.2435150269952304,0.04772351523132277,Watermarked +2252,0.5737704918032787,0.4291666666666667,-3.4762904569519018,0.04016293189095533,Watermarked +2253,0.5166666666666667,0.4148224043715847,-5.582808024011134,0.011347326397319939,Watermarked +2254,0.6,0.39385245901639343,-4.622249873571463,0.019062378557272608,Watermarked +2255,0.55,0.42288251366120216,-2.2047766760939767,0.11465918253571977,Watermarked +2256,0.48333333333333334,0.39392076502732243,-3.234755633152603,0.048041872703208165,Watermarked +2257,0.5333333333333333,0.4519808743169399,-1.7442611130916705,0.17946189402091586,Watermarked +2258,0.6065573770491803,0.3791666666666667,-4.961251862891206,0.01572399871994463,Watermarked +2259,0.7213114754098361,0.3041666666666667,-10.94515337164297,0.0016326978989827164,Watermarked +2260,0.48333333333333334,0.4235655737704918,-2.1896107986499023,0.11629675807965059,Watermarked +2261,0.5333333333333333,0.43948087431693983,-2.3754283562972716,0.09802174273100718,Watermarked +2262,0.6721311475409836,0.4208333333333333,-4.533288093479356,0.020088194824742056,Watermarked +2263,0.5833333333333334,0.3310109289617486,-3.076310185278194,0.05429278214985614,Watermarked +2264,0.55,0.43586065573770494,-3.3338420681873173,0.04459138016214428,Watermarked +2265,0.5333333333333333,0.4228825136612022,-3.399472771732523,0.042477781423474185,Watermarked +2266,0.5901639344262295,0.4666666666666667,-2.9838938061318583,0.0584148664790183,Watermarked +2267,0.639344262295082,0.3833333333333333,-8.868478725093105,0.003022689256232251,Watermarked +2268,0.6666666666666666,0.4437841530054645,-11.678549121160296,0.0013488323998950805,Watermarked +2269,0.5166666666666667,0.4271174863387978,-3.7411614919740814,0.03331794416540563,Watermarked +2270,0.5666666666666667,0.4189207650273224,-4.8323202058438195,0.016896487730970963,Watermarked +2271,0.5666666666666667,0.344672131147541,-3.571560837934814,0.03751043917785419,Watermarked +2272,0.5833333333333334,0.3942622950819672,-7.793998738706418,0.004395749962862252,Watermarked +2273,0.5833333333333334,0.47677595628415304,-2.363289165755069,0.09910414730873617,Watermarked +2274,0.55,0.43586065573770494,-4.152645529463587,0.025381891014694326,Watermarked +2275,0.5166666666666667,0.40225409836065573,-4.3808607027131465,0.022018397914196755,Watermarked +2276,0.639344262295082,0.41250000000000003,-7.574148554850093,0.0047738189211758006,Watermarked +2277,0.5737704918032787,0.45833333333333337,-2.975985583098297,0.058785578261159106,Watermarked +2278,0.5166666666666667,0.4522540983606557,-3.550343434787011,0.03808136417851271,Watermarked +2279,0.5409836065573771,0.4,-3.9876185693143014,0.028236554956465203,Watermarked +2280,0.6557377049180327,0.3416666666666667,-7.253159939163658,0.005406805253936257,Watermarked +2281,0.6229508196721312,0.38749999999999996,-3.8840535871493516,0.030241314676875894,Watermarked +2282,0.7666666666666667,0.36065573770491804,-12.408708085543239,0.001127792567025593,Watermarked +2283,0.5573770491803278,0.35000000000000003,-6.992150468759068,0.006005502007535316,Watermarked +2284,0.5166666666666667,0.4148907103825137,-9.202675289866411,0.0027137429423814024,Watermarked +2285,0.6666666666666666,0.42288251366120216,-4.288438027587506,0.023306668223673156,Watermarked +2286,0.45901639344262296,0.3666666666666667,-2.327678697056396,0.10236424529812158,Watermarked +2287,0.7213114754098361,0.38333333333333336,-8.1660987570467,0.0038411801830783828,Watermarked +2288,0.6166666666666667,0.43196721311475417,-3.728410507789727,0.03361162082090968,Watermarked +2289,0.5409836065573771,0.38333333333333336,-6.688534635157998,0.0068169278980430605,Watermarked +2290,0.5166666666666667,0.39385245901639343,-3.766631201193383,0.03274110546757008,Watermarked +2291,0.5333333333333333,0.3813524590163934,-3.0906245783493898,0.05368742829459938,Watermarked +2292,0.5737704918032787,0.4,-4.7424625152411375,0.01778161142649878,Watermarked +2293,0.6,0.38162568306010936,-6.039077465113362,0.009104762087317147,Watermarked +2294,0.6229508196721312,0.5041666666666667,-3.021862808739443,0.05667530663209718,Watermarked +2295,0.55,0.4146857923497268,-3.8865083819708626,0.030191715950954285,Watermarked +2296,0.55,0.476912568306011,-3.077135925982908,0.0542576288739498,Watermarked +2297,0.6885245901639344,0.39583333333333337,-15.840015885207144,0.0005470254169839911,Watermarked +2298,0.7540983606557377,0.39999999999999997,-11.93916601780205,0.0012638276378236352,Watermarked +2299,0.5166666666666667,0.40218579234972673,-2.5598239902355586,0.08322688778357085,Watermarked +2300,0.7213114754098361,0.36666666666666664,-11.112432660516188,0.0015614391657328315,Watermarked +2301,0.6065573770491803,0.45416666666666666,-2.293338139417192,0.10563206329335763,Watermarked +2302,0.47540983606557374,0.37083333333333335,-2.937540923889767,0.06063034832622183,Watermarked +2303,0.6065573770491803,0.4,-7.838301347827194,0.004324395459725133,Watermarked +2304,0.6,0.39344262295081966,-2.704003786107557,0.0735363944189725,Watermarked +2305,0.5245901639344263,0.4291666666666667,-2.2939904432507365,0.10556882390727751,Watermarked +2306,0.639344262295082,0.43333333333333335,-5.721871764168063,0.010593767203391514,Watermarked +2307,0.6557377049180327,0.4375,-9.510009673164706,0.002465501797241344,Watermarked +2308,0.5333333333333333,0.4521174863387978,-3.296192817472393,0.04586424145751324,Watermarked +2309,0.5333333333333333,0.373155737704918,-3.7418090166847584,0.03330311842719912,Watermarked +2310,0.5666666666666667,0.39002732240437155,-5.154995937878714,0.014153740439881685,Watermarked +2311,0.5666666666666667,0.39767759562841526,-2.5958139567349727,0.08066720661779468,Watermarked +2312,0.5666666666666667,0.4650273224043716,-3.1399952826589113,0.051663705233341434,Watermarked +2313,0.6166666666666667,0.3982923497267759,-4.422992997264673,0.021461687900979676,Watermarked +2314,0.6166666666666667,0.38934426229508196,-3.7175780367875846,0.03386371975978312,Watermarked +2315,0.48333333333333334,0.4191256830601093,-2.474712698219785,0.08968894024591773,Watermarked +2316,0.6885245901639344,0.4208333333333334,-6.523182985505984,0.007320113516282464,Watermarked +2317,0.639344262295082,0.4375,-5.669780163353537,0.010868137779207396,Watermarked +2318,0.6,0.34795081967213115,-4.836426389706701,0.01685742415885029,Watermarked +2319,0.47540983606557374,0.27499999999999997,-6.209471659985009,0.008417331899586392,Watermarked +2320,0.6666666666666666,0.4395491803278688,-6.062960519560702,0.009004075645090898,Watermarked +2321,0.5166666666666667,0.38633879781420766,-3.142740772481161,0.051554012753716585,Watermarked +2322,0.5166666666666667,0.3895491803278689,-2.647776278064909,0.07714046268331175,Watermarked +2323,0.5737704918032787,0.4375,-3.5755004055064217,0.037405636887797994,Watermarked +2324,0.5666666666666667,0.47288251366120215,-1.789503264890946,0.17147429025836847,Watermarked +2325,0.5833333333333334,0.3976092896174863,-2.616716369900444,0.07922508793306127,Watermarked +2326,0.6885245901639344,0.3458333333333334,-12.446270789290725,0.0011177670196131112,Watermarked +2327,0.5666666666666667,0.4144125683060109,-3.3818058014152355,0.04303398980095592,Watermarked +2328,0.4666666666666667,0.40225409836065573,-1.9432287224055769,0.14724565777265242,Watermarked +2329,0.639344262295082,0.3625,-6.568076588878649,0.007178794023354771,Watermarked +2330,0.5166666666666667,0.38989071038251366,-3.8433173783046315,0.031079871545519045,Watermarked +2331,0.5666666666666667,0.41058743169398904,-5.412428171026809,0.012369191290195588,Watermarked +2332,0.6065573770491803,0.4291666666666667,-4.446685303445383,0.021156625841990514,Watermarked +2333,0.6065573770491803,0.41666666666666663,-4.468875866263539,0.020875975290483854,Watermarked +2334,0.5166666666666667,0.4230191256830601,-2.1008219346441472,0.12646621272270542,Watermarked +2335,0.5833333333333334,0.40710382513661203,-4.176894082046699,0.02499431409045767,Watermarked +2336,0.65,0.41065573770491803,-6.713645725875071,0.0067445330979899526,Watermarked +2337,0.5573770491803278,0.425,-2.8842564548659144,0.06330874589287337,Watermarked +2338,0.6229508196721312,0.45416666666666666,-6.130104668195616,0.008728726955771435,Watermarked +2339,0.7049180327868853,0.37083333333333335,-8.031429676406121,0.004030601648600872,Watermarked +2340,0.6,0.4480191256830601,-3.7683602379617436,0.032702411681861464,Watermarked +2341,0.48333333333333334,0.41885245901639345,-1.6093008135111428,0.20592487686100341,Watermarked +2342,0.7049180327868853,0.37083333333333335,-6.267404269385084,0.008199190432380252,Watermarked +2343,0.55,0.4438524590163934,-5.090988202057138,0.014648921763255886,Watermarked +2344,0.5081967213114754,0.3083333333333333,-18.57762175574247,0.0003403993871209538,Watermarked +2345,0.45,0.40232240437158473,-2.44235164169727,0.09230641363259132,Watermarked +2346,0.48333333333333334,0.39801912568306014,-2.3401785284145156,0.10120527554802543,Watermarked +2347,0.5,0.3650273224043715,-3.293760329942494,0.0459480546557983,Watermarked +2348,0.4666666666666667,0.392167577413479,-1.8864910288697916,0.19986672981680156,Watermarked +2349,0.6065573770491803,0.45833333333333337,-4.811368296463101,0.017097654477865952,Watermarked +2350,0.5833333333333334,0.3773224043715847,-9.313966981164503,0.002620182057561568,Watermarked +2351,0.5666666666666667,0.4148224043715847,-4.737661283448938,0.017830583546120875,Watermarked +2352,0.639344262295082,0.4583333333333333,-4.666486269355321,0.018577558529827218,Watermarked +2353,0.6666666666666666,0.43920765027322406,-4.46624735000153,0.02090896608594079,Watermarked +2354,0.5166666666666667,0.4187158469945355,-1.9672355178709884,0.1438337407120422,Watermarked +2355,0.6065573770491803,0.4,-8.419686273573582,0.0035152556902358035,Watermarked +2356,0.5245901639344263,0.44166666666666665,-3.447065049817408,0.041024239621887254,Watermarked +2357,0.5573770491803278,0.45,-3.945284708663067,0.029034743151997156,Watermarked +2358,0.5333333333333333,0.41045081967213115,-3.0577746633809832,0.055089469506326795,Watermarked +2359,0.65,0.4066256830601093,-14.185315533396013,0.0007589934893947729,Watermarked +2360,0.5833333333333334,0.3400273224043716,-5.098953338267703,0.0145860830962082,Watermarked +2361,0.6166666666666667,0.4058743169398907,-3.305417706655741,0.04554814619701029,Watermarked +2362,0.5573770491803278,0.4125,-2.2397642698297733,0.11098584190703364,Watermarked +2363,0.6229508196721312,0.4041666666666667,-6.036355410889254,0.009116331366960937,Watermarked +2364,0.4918032786885246,0.4125,-3.074072401423919,0.05438819286539707,Watermarked +2365,0.6833333333333333,0.3903005464480874,-10.873299213730528,0.0016646323342411745,Watermarked +2366,0.6885245901639344,0.36249999999999993,-8.995168397182145,0.0029003471322019392,Watermarked +2367,0.5737704918032787,0.4208333333333333,-6.6644480933382715,0.006887332838366486,Watermarked +2368,0.5409836065573771,0.425,-2.678526658699412,0.07514300545940161,Watermarked +2369,0.5666666666666667,0.43558743169398906,-8.104167446872971,0.003926796249409396,Watermarked +2370,0.5833333333333334,0.4191256830601093,-4.775358960423008,0.017450729445301273,Watermarked +2371,0.7049180327868853,0.40416666666666673,-6.290438634313473,0.008114520022772869,Watermarked +2372,0.7333333333333333,0.3650956284153005,-6.627801681197047,0.0069962968928984955,Watermarked +2373,0.5333333333333333,0.42295081967213116,-2.3568974575090693,0.09967991090436344,Watermarked +2374,0.5333333333333333,0.41045081967213115,-4.3952411570175425,0.021826304453140925,Watermarked +2375,0.639344262295082,0.41250000000000003,-3.4877149221186383,0.03983250358959142,Watermarked +2376,0.6,0.4026639344262295,-4.885900809261954,0.016395862833158793,Watermarked +2377,0.7049180327868853,0.37916666666666665,-7.6296187670353355,0.004674501764841112,Watermarked +2378,0.6229508196721312,0.4458333333333333,-6.638664993068123,0.006963760953222826,Watermarked +2379,0.7166666666666667,0.41912568306010933,-17.09897132088851,0.00043575164889100897,Watermarked +2380,0.6833333333333333,0.44829234972677595,-5.766710653623844,0.010364820031007032,Watermarked +2381,0.639344262295082,0.4125,-5.5278108432930315,0.01166468471503337,Watermarked +2382,0.5901639344262295,0.425,-10.350486199201907,0.0019239067953852225,Watermarked +2383,0.5166666666666667,0.4314890710382514,-3.272785517027942,0.046678870059437404,Watermarked +2384,0.6065573770491803,0.39583333333333337,-10.70159618814971,0.0017443718008383813,Watermarked +2385,0.6557377049180327,0.4,-4.125557008441323,0.025824079480551702,Watermarked +2386,0.6666666666666666,0.39385245901639343,-8.973702530064598,0.0029206091675186585,Watermarked +2387,0.6666666666666666,0.41885245901639345,-12.364286659593969,0.0011398034481367935,Watermarked +2388,0.4426229508196721,0.4,-2.801466195764125,0.06777041139462144,Watermarked +2389,0.6721311475409836,0.4166666666666667,-7.828769077961604,0.004339618418212919,Watermarked +2390,0.6721311475409836,0.3458333333333333,-12.230197713801166,0.0011771054889125297,Watermarked +2391,0.5081967213114754,0.4041666666666667,-4.3462339347627506,0.02249000005004558,Watermarked +2392,0.6885245901639344,0.43333333333333335,-9.683827326548414,0.0023383191790880323,Watermarked +2393,0.5,0.4439207650273224,-2.1366037206586563,0.12224492481984166,Watermarked +2394,0.7704918032786885,0.41666666666666663,-23.25576104817017,0.00017417898616924437,Watermarked +2395,0.6,0.3983606557377049,-14.711044641258004,0.0006813380311103391,Watermarked +2396,0.5901639344262295,0.44166666666666665,-3.8282796790862137,0.0313969565110335,Watermarked +2397,0.55,0.3980874316939891,-5.915897985549872,0.009648207488286976,Watermarked +2398,0.6557377049180327,0.4291666666666667,-4.396123870194077,0.021814583998735686,Watermarked +2399,0.7704918032786885,0.375,-27.40047589022894,0.00010668857406321375,Watermarked +2400,0.6721311475409836,0.4416666666666667,-5.458841849494081,0.012079282996343666,Watermarked +2401,0.5737704918032787,0.39166666666666666,-7.569913857342007,0.004781514945095522,Watermarked +2402,0.5666666666666667,0.44781420765027324,-3.9576539242406734,0.02879855831206138,Watermarked +2403,0.5737704918032787,0.43333333333333335,-3.3050391298697224,0.04556106387896335,Watermarked +2404,0.6229508196721312,0.275,-20.058020052863462,0.0002708532723360041,Watermarked +2405,0.7,0.38196721311475407,-10.445379856844973,0.001873056150700024,Watermarked +2406,0.5833333333333334,0.35655737704918034,-5.1034502548832,0.01455076115150943,Watermarked +2407,0.8032786885245902,0.3791666666666667,-15.40340748070682,0.0005943876559434028,Watermarked +2408,0.5245901639344263,0.375,-4.047798691785726,0.02714981201550245,Watermarked +2409,0.5,0.44398907103825136,-3.5630178449635013,0.0377389945733,Watermarked +2410,0.7049180327868853,0.4666666666666667,-6.095443431544011,0.008869465900928337,Watermarked +2411,0.55,0.4312841530054645,-3.2330608226780675,0.0481037799750477,Watermarked +2412,0.5409836065573771,0.4416666666666667,-2.8905475311707214,0.06298491773711792,Watermarked +2413,0.55,0.4110655737704918,-5.966922212523975,0.009418071176895705,Watermarked +2414,0.75,0.36495901639344264,-15.214865853342191,0.0006165289089404656,Watermarked +2415,0.6721311475409836,0.3916666666666667,-33.655737704917975,5.766533760280624e-05,Watermarked +2416,0.5666666666666667,0.4230874316939891,-3.063975004864087,0.054821341360087615,Watermarked +2417,0.5409836065573771,0.42500000000000004,-7.268455569166599,0.005374210400514755,Watermarked +2418,0.6065573770491803,0.37083333333333335,-7.4933789577565,0.004923473774846551,Watermarked +2419,0.5737704918032787,0.48750000000000004,-4.140983606557374,0.025571051486798255,Watermarked +2420,0.6229508196721312,0.45416666666666666,-5.024423429015315,0.015188120980305192,Watermarked +2421,0.6666666666666666,0.34426229508196726,-10.200056186904932,0.0020083355064274685,Watermarked +2422,0.75,0.4724043715846995,-4.432281354966694,0.021341416432768157,Watermarked +2423,0.6,0.435724043715847,-7.812794009945692,0.004365289535356393,Watermarked +2424,0.5245901639344263,0.37083333333333335,-5.133817644374452,0.014315130666193885,Watermarked +2425,0.5573770491803278,0.42083333333333334,-5.4872114695067635,0.011906446056017608,Watermarked +2426,0.6885245901639344,0.44583333333333336,-5.613383822262529,0.011175768196393236,Watermarked +2427,0.6721311475409836,0.4666666666666667,-3.3346995013274205,0.044562916865554185,Watermarked +2428,0.6,0.3775273224043716,-6.826452300266374,0.006431509604653321,Watermarked +2429,0.6166666666666667,0.3818989071038251,-7.577650824830856,0.004767466337140692,Watermarked +2430,0.5666666666666667,0.4771857923497268,-2.244146678485596,0.11053578481406542,Watermarked +2431,0.6065573770491803,0.35833333333333334,-3.3233585459170785,0.044941262424214,Watermarked +2432,0.5245901639344263,0.4125000000000001,-6.066153107963813,0.008990727202934024,Watermarked +2433,0.6885245901639344,0.37916666666666665,-29.50232973023399,8.552802430562091e-05,Watermarked +2434,0.65,0.35628415300546445,-6.079870046364211,0.008933670268337511,Watermarked +2435,0.7333333333333333,0.42356557377049187,-9.685668717829044,0.002337018895349377,Watermarked +2436,0.639344262295082,0.38333333333333336,-11.898312771791307,0.001276673082635795,Watermarked +2437,0.65,0.45642076502732243,-6.157320526109763,0.008620267599043325,Watermarked +2438,0.6,0.35273224043715845,-11.787092032898832,0.0013125340009585288,Watermarked +2439,0.6557377049180327,0.48750000000000004,-4.044451296869408,0.027208830672879287,Watermarked +2440,0.6065573770491803,0.32916666666666666,-8.618610742527247,0.003284743377152693,Watermarked +2441,0.8360655737704918,0.3625,-21.607919158456742,0.00021691691845571816,Watermarked +2442,0.6065573770491803,0.425,-12.578664061524803,0.0010833606619678423,Watermarked +2443,0.5737704918032787,0.4166666666666667,-5.772363123361995,0.010336420243029285,Watermarked +2444,0.5166666666666667,0.43968579234972677,-2.8833841562530913,0.06335381086618001,Watermarked +2445,0.5833333333333334,0.4685109289617486,-3.74670122389729,0.03319137901399563,Watermarked +2446,0.6065573770491803,0.3916666666666667,-4.201654069631227,0.02460640768982449,Watermarked +2447,0.5333333333333333,0.4519808743169399,-3.088509743881098,0.05377632746313144,Watermarked +2448,0.6166666666666667,0.464412568306011,-3.9929846359991004,0.0281374053103564,Watermarked +2449,0.8688524590163934,0.4041666666666667,-111.52459016393443,1.5893997832869387e-06,Watermarked +2450,0.6557377049180327,0.3625,-10.053864168618263,0.002095156783314077,Watermarked +2451,0.639344262295082,0.46249999999999997,-4.30939544513269,0.023006171549745307,Watermarked +2452,0.6333333333333333,0.41878415300546445,-3.907188916118307,0.029777989336963344,Watermarked +2453,0.5333333333333333,0.4311475409836066,-2.3185802321951905,0.10321797628133385,Watermarked +2454,0.6,0.4395491803278689,-3.7611226340766057,0.03286477251791561,Watermarked +2455,0.5573770491803278,0.42500000000000004,-7.6310252969080015,0.004672019040052037,Watermarked +2456,0.48333333333333334,0.3772540983606557,-2.892862105157335,0.06286629800985993,Watermarked +2457,0.55,0.3484972677595628,-4.079918215864471,0.026591801968465816,Watermarked +2458,0.5666666666666667,0.35689890710382516,-3.012713968246357,0.057088449146294205,Watermarked +2459,0.5833333333333334,0.4519808743169399,-3.585650713444847,0.03713733022490143,Watermarked +2460,0.6,0.39829234972677596,-6.228521035735816,0.008344770553796356,Watermarked +2461,0.5833333333333334,0.36495901639344264,-11.467033890261218,0.001423486681886134,Watermarked +2462,0.6885245901639344,0.37083333333333335,-18.492347410557112,0.00034509721387531697,Watermarked +2463,0.639344262295082,0.35833333333333334,-7.031379503531193,0.005910122809393594,Watermarked +2464,0.6666666666666666,0.4273224043715847,-7.050049697276166,0.005865429795204114,Watermarked +2465,0.5333333333333333,0.36475409836065575,-3.2441722541392934,0.047699736310779436,Watermarked +2466,0.6229508196721312,0.40416666666666673,-4.882283495355984,0.016429048371143395,Watermarked +2467,0.48333333333333334,0.40286885245901644,-2.273916596472962,0.10753609772746466,Watermarked +2468,0.7166666666666667,0.43176229508196723,-4.998106522840042,0.01540842738464649,Watermarked +2469,0.6833333333333333,0.44036885245901636,-2.6121434779788526,0.07953784934392538,Watermarked +2470,0.5666666666666667,0.48524590163934433,-1.929654958496209,0.1492167828881105,Watermarked +2471,0.5333333333333333,0.4275273224043716,-3.5561117736258945,0.037925056755999366,Watermarked +2472,0.639344262295082,0.4625,-4.040627784984222,0.027276446171401562,Watermarked +2473,0.639344262295082,0.475,-6.831662693460199,0.006417517963111248,Watermarked +2474,0.7049180327868853,0.35833333333333334,-24.012092343181642,0.0001582981185228392,Watermarked +2475,0.7704918032786885,0.4041666666666667,-8.579921461021282,0.003327975019105188,Watermarked +2476,0.5666666666666667,0.3778688524590164,-5.101866069096539,0.014563191721761432,Watermarked +2477,0.7166666666666667,0.385724043715847,-7.664492373372008,0.004613454840878532,Watermarked +2478,0.48333333333333334,0.4031876138433515,-3.0047783082409976,0.09520452944787951,Watermarked +2479,0.6065573770491803,0.4375,-5.252662296910844,0.013439429896053543,Watermarked +2480,0.43333333333333335,0.35280054644808745,-2.803386864259781,0.06766254223000473,Watermarked +2481,0.7868852459016393,0.38333333333333336,-10.324506821548738,0.0019381469069513462,Watermarked +2482,0.6166666666666667,0.42288251366120216,-3.622802747830676,0.036175989328214676,Watermarked +2483,0.65,0.3319672131147541,-17.615818436997262,0.0003987923532231844,Watermarked +2484,0.6833333333333333,0.4146174863387978,-6.525168128577647,0.007313787597179912,Watermarked +2485,0.6,0.3689207650273224,-4.354954591021095,0.022370006686486543,Watermarked +2486,0.5737704918032787,0.3625,-4.5595686353563485,0.01977785439608666,Watermarked +2487,0.65,0.41475409836065574,-8.217472162729653,0.00377202241458133,Watermarked +2488,0.6333333333333333,0.4316256830601093,-6.118804569189945,0.008774285877481431,Watermarked +2489,0.5573770491803278,0.3958333333333333,-5.135277089276766,0.014303932115525365,Watermarked +2490,0.5666666666666667,0.4191256830601093,-3.801261952080425,0.03197715417981016,Watermarked +2491,0.6065573770491803,0.3666666666666667,-6.232543479694566,0.008329553737779013,Watermarked +2492,0.5901639344262295,0.4083333333333333,-8.2470608782833,0.0037329364018606546,Watermarked +2493,0.5666666666666667,0.41892076502732245,-5.9784922810858205,0.009366890251308508,Watermarked +2494,0.5666666666666667,0.4479508196721312,-2.7046377161768436,0.0734969637824313,Watermarked +2495,0.6833333333333333,0.38169398907103824,-8.926429305995807,0.002965896425199081,Watermarked +2496,0.6557377049180327,0.4208333333333333,-6.853539958645125,0.00635920568434066,Watermarked +2497,0.5833333333333334,0.4351775956284153,-3.62042968770528,0.03623643715847006,Watermarked +2498,0.7704918032786885,0.3666666666666667,-13.615808159190916,0.0008569802174432299,Watermarked +2499,0.6229508196721312,0.3833333333333333,-13.310561106566025,0.0009164879399678906,Watermarked +2500,0.7049180327868853,0.26666666666666666,-13.732158696046199,0.0008356507564616465,Watermarked +2501,0.5833333333333334,0.3980874316939891,-8.506737943489776,0.003411830999011348,Watermarked +2502,0.6885245901639344,0.44166666666666665,-5.438685446957326,0.01220408026603391,Watermarked +2503,0.48333333333333334,0.4064890710382514,-2.559704324213269,0.0832355641629743,Watermarked +2504,0.7,0.4148224043715847,-5.693637680357018,0.010741337381016519,Watermarked +2505,0.6166666666666667,0.4025273224043716,-8.890442589650126,0.0030009962178549105,Watermarked +2506,0.6557377049180327,0.42500000000000004,-6.851133053826303,0.006365586871051235,Watermarked +2507,0.6229508196721312,0.4791666666666667,-5.22212477804604,0.013657614104771973,Watermarked +2508,0.6166666666666667,0.3894808743169399,-4.915294791461667,0.01612940842830176,Watermarked +2509,0.8688524590163934,0.35,-25.41847552756969,0.0001335388647953177,Watermarked +2510,0.5666666666666667,0.37315573770491806,-3.2713284175710178,0.04673018421334356,Watermarked +2511,0.6333333333333333,0.43606557377049177,-5.107098757651382,0.014522185166915114,Watermarked +2512,0.6333333333333333,0.37315573770491806,-8.428620891983224,0.003504451310362902,Watermarked +2513,0.6,0.35273224043715845,-29.617790180570413,8.45343765080844e-05,Watermarked +2514,0.6557377049180327,0.44999999999999996,-4.3195919820040585,0.02286178010818133,Watermarked +2515,0.7377049180327869,0.3291666666666667,-15.836371260048626,0.000547399610373209,Watermarked +2516,0.5573770491803278,0.4333333333333333,-2.604375511062208,0.08007263680217473,Watermarked +2517,0.5409836065573771,0.42500000000000004,-4.639344262295081,0.01887310292166381,Watermarked +2518,0.6229508196721312,0.3958333333333333,-4.09708695591706,0.026299586172448756,Watermarked +2519,0.5409836065573771,0.3958333333333333,-3.537066558867117,0.03844426851884796,Watermarked +2520,0.6166666666666667,0.4230874316939891,-6.814259023242736,0.006464409944966311,Watermarked +2521,0.6721311475409836,0.3416666666666666,-30.71720234254342,7.580035597644182e-05,Watermarked +2522,0.7213114754098361,0.42916666666666664,-9.51209634369107,0.002463921302178941,Watermarked +2523,0.5833333333333334,0.39772313296903467,-11.857859091548148,0.00703694821787774,Watermarked +2524,0.7166666666666667,0.36461748633879776,-8.164949484411201,0.003842746397948636,Watermarked +2525,0.5409836065573771,0.5,-2.0077784776911325,0.1382803772888902,Watermarked +2526,0.6666666666666666,0.4025273224043716,-24.399378398771418,0.0001509087662519508,Watermarked +2527,0.6,0.40211748633879785,-3.4850713293771878,0.03990865507980857,Watermarked +2528,0.5409836065573771,0.4541666666666667,-2.176256072652707,0.11776205733944392,Watermarked +2529,0.5409836065573771,0.39166666666666666,-4.846843823072969,0.01675884522410491,Watermarked +2530,0.6333333333333333,0.4689890710382514,-2.3029347384916004,0.10470629811332137,Watermarked +2531,0.7377049180327869,0.3125,-40.55023240686837,3.3002033131333967e-05,Watermarked +2532,0.65,0.44330601092896177,-3.289205567147825,0.04610551507892302,Watermarked +2533,0.5333333333333333,0.3819672131147541,-2.224352555510678,0.11258619639823252,Watermarked +2534,0.5245901639344263,0.38333333333333336,-2.615568260013164,0.07930346961482511,Watermarked +2535,0.7666666666666667,0.3896857923497268,-13.06989618982596,0.0009673345468333082,Watermarked +2536,0.6229508196721312,0.44999999999999996,-2.9350727376464576,0.06075124544604779,Watermarked +2537,0.6557377049180327,0.38333333333333336,-4.592341249903026,0.0193995061866707,Watermarked +2538,0.6,0.3729508196721311,-5.094565492831203,0.014620656170024945,Watermarked +2539,0.7666666666666667,0.34446721311475414,-16.788036508322786,0.00046020497513463827,Watermarked +2540,0.5901639344262295,0.33333333333333337,-5.392319340084748,0.012497618930811919,Watermarked +2541,0.6065573770491803,0.41666666666666663,-12.480891064461987,0.0011086313738353398,Watermarked +2542,0.7213114754098361,0.45,-15.07113703400236,0.0006341498821599429,Watermarked +2543,0.7,0.38592896174863384,-7.972344974800964,0.004117606574879096,Watermarked +2544,0.5409836065573771,0.3791666666666667,-7.383406964717152,0.00513735130962263,Watermarked +2545,0.5081967213114754,0.3666666666666667,-3.4195951141068646,0.041855350747093845,Watermarked +2546,0.5901639344262295,0.48750000000000004,-2.6937228618928075,0.07417954696153008,Watermarked +2547,0.5245901639344263,0.3375,-8.980327868852457,0.0029143354515137163,Watermarked +2548,0.6333333333333333,0.3610655737704918,-6.352580462402448,0.007891754845695532,Watermarked +2549,0.45,0.3734972677595629,-4.334361862984616,0.022654697680077447,Watermarked +2550,0.6166666666666667,0.4599726775956284,-2.514156166273152,0.08662037793756441,Watermarked +2551,0.5166666666666667,0.360724043715847,-3.4306011747993215,0.041519810852172816,Watermarked +2552,0.6,0.44405737704918036,-3.2820666584674085,0.04635368887671906,Watermarked +2553,0.6666666666666666,0.4273224043715847,-6.912582855578067,0.006205274596049662,Watermarked +2554,0.5333333333333333,0.39849726775956285,-7.109187347867667,0.005726771511083016,Watermarked +2555,0.6885245901639344,0.3875,-7.23666133386256,0.005442256485075891,Watermarked +2556,0.5833333333333334,0.41523224043715845,-2.490304414512709,0.08846028307415578,Watermarked +2557,0.6833333333333333,0.4265710382513661,-3.6163076187603744,0.03634174375169587,Watermarked +2558,0.6885245901639344,0.35416666666666663,-13.436668405820718,0.000891257804744702,Watermarked +2559,0.55,0.3236338797814208,-3.600554934819319,0.03674779775320059,Watermarked +2560,0.4,0.2903688524590164,-7.987447429719284,0.004095133323667272,Watermarked +2561,0.6065573770491803,0.45833333333333337,-5.720862788355775,0.010598994876723512,Watermarked +2562,0.5573770491803278,0.3666666666666667,-5.605717509713631,0.01121846256816809,Watermarked +2563,0.55,0.3857923497267759,-2.773401460624423,0.06937128230425361,Watermarked +2564,0.5901639344262295,0.44583333333333336,-8.401275011504453,0.0035376591875335185,Watermarked +2565,0.6065573770491803,0.39583333333333337,-5.442971730211302,0.012177401351288936,Watermarked +2566,0.6229508196721312,0.41250000000000003,-4.643102959582609,0.018831812762783767,Watermarked +2567,0.5666666666666667,0.45614754098360655,-3.6102043743828824,0.03649838253815035,Watermarked +2568,0.65,0.4227459016393443,-5.2448185798443125,0.013495034751188689,Watermarked +2569,0.6229508196721312,0.425,-4.312990302477515,0.022955131152546198,Watermarked +2570,0.6229508196721312,0.4541666666666667,-6.130104668195612,0.008728726955771456,Watermarked +2571,0.6833333333333333,0.4685109289617486,-6.044103218158989,0.009083452194363606,Watermarked +2572,0.6666666666666666,0.551844262295082,-16.28766880858878,0.0005035373682223364,Watermarked +2573,0.6229508196721312,0.3416666666666667,-4.923541270396146,0.016055668617949045,Watermarked +2574,0.5,0.4148224043715847,-2.935331303657975,0.06073856617509204,Watermarked +2575,0.5666666666666667,0.39788251366120214,-3.995817683425296,0.028085239330913513,Watermarked +2576,0.5245901639344263,0.42083333333333334,-5.615165491162119,0.011165876393712151,Watermarked +2577,0.6166666666666667,0.44760928961748636,-2.8779672917258896,0.06363455777153253,Watermarked +2578,0.5737704918032787,0.4125,-4.800754235493788,0.017200753129888367,Watermarked +2579,0.5901639344262295,0.33749999999999997,-6.629461654119429,0.006991312298326812,Watermarked +2580,0.4918032786885246,0.4041666666666667,-10.98401471759805,0.0016157647819501492,Watermarked +2581,0.6,0.4559426229508197,-2.024940270686622,0.13600606005905447,Watermarked +2582,0.5166666666666667,0.36878415300546447,-2.8875369089248184,0.06313962819005703,Watermarked +2583,0.6,0.3898224043715847,-6.736784395106943,0.006678717888232159,Watermarked +2584,0.6721311475409836,0.4041666666666667,-15.59782388553334,0.0005726503128458851,Watermarked +2585,0.5409836065573771,0.35416666666666663,-10.11025517993969,0.002061088422236367,Watermarked +2586,0.5666666666666667,0.43135245901639346,-8.36023757475581,0.0035882787219289397,Watermarked +2587,0.5333333333333333,0.40683060109289615,-3.685355043522012,0.034628080098915076,Watermarked +2588,0.639344262295082,0.37083333333333335,-8.342713328952133,0.0036101866302316646,Watermarked +2589,0.6557377049180327,0.37916666666666665,-22.125683060109264,0.00020211389888430634,Watermarked +2590,0.5666666666666667,0.40204918032786885,-3.8720368025363827,0.030485627570044506,Watermarked +2591,0.5833333333333334,0.41509562841530057,-3.918936678185741,0.029546209898049555,Watermarked +2592,0.47540983606557374,0.37083333333333335,-2.2816691505216085,0.10677112400407492,Watermarked +2593,0.5666666666666667,0.4145491803278688,-3.225298064940852,0.04838862749274675,Watermarked +2594,0.6065573770491803,0.3833333333333333,-5.3934540699330284,0.012490325558165266,Watermarked +2595,0.65,0.3523224043715847,-6.072607039010736,0.008963822063323797,Watermarked +2596,0.6557377049180327,0.4125,-6.486338797814206,0.007438839601531942,Watermarked +2597,0.5,0.4363387978142077,-3.468352695270501,0.07401784897865982,Watermarked +2598,0.8333333333333334,0.4443306010928962,-13.069044138229902,0.0009675211431764483,Watermarked +2599,0.5737704918032787,0.41250000000000003,-3.6410524102519206,0.03571540330483068,Watermarked +2600,0.43333333333333335,0.194603825136612,-7.079275684498563,0.005796358119040252,Watermarked +2601,0.6721311475409836,0.2666666666666667,-13.671082075940005,0.0008467582451760971,Watermarked +2602,0.4426229508196721,0.3041666666666667,-3.470708937267535,0.04032563476307778,Watermarked +2603,0.4,0.2525273224043716,-2.709448750044224,0.07319856881186551,Watermarked +2604,0.4918032786885246,0.21666666666666665,-5.554402221351035,0.011509810078003106,Watermarked +2605,0.45,0.26939890710382514,-3.323461871958838,0.04493779701217889,Watermarked +2606,0.4918032786885246,0.24583333333333332,-7.477098647437464,0.004954385859733728,Watermarked +2607,0.5245901639344263,0.35416666666666663,-3.5645342704092955,0.03769829534729827,Watermarked +2608,0.38333333333333336,0.2655737704918033,-4.795356126981156,0.017253497348406005,Watermarked +2609,0.5737704918032787,0.2583333333333333,-8.132012676182216,0.0038879932294071462,Watermarked +2610,0.39344262295081966,0.21666666666666667,-9.819764009062963,0.0022448478763413245,Watermarked +2611,0.4426229508196721,0.26666666666666666,-2.7725030413659453,0.06942330291539806,Watermarked +2612,0.35,0.21147540983606558,-10.383401644446371,0.001906063349217342,Watermarked +2613,0.6885245901639344,0.19166666666666668,-22.535357185812448,0.00019134068124253585,Watermarked +2614,0.6557377049180327,0.20416666666666666,-12.92280294143108,0.0010002691021999664,Watermarked +2615,0.6229508196721312,0.25416666666666665,-11.210456983480942,0.001521591601458421,Watermarked +2616,0.5737704918032787,0.21666666666666667,-15.150632180505205,0.0006243223321137057,Watermarked +2617,0.5245901639344263,0.22499999999999998,-6.918738471764206,0.006189509895466707,Watermarked +2618,0.5166666666666667,0.2528688524590164,-8.617015849137902,0.0032865107935262,Watermarked +2619,0.4,0.23620218579234975,-5.140402367328491,0.014264695462063416,Watermarked +2620,0.5737704918032787,0.2791666666666667,-9.365095173805795,0.0025786301461165144,Watermarked +2621,0.5081967213114754,0.22916666666666663,-8.306260504867486,0.003656327731735171,Watermarked +2622,0.5409836065573771,0.2375,-7.720607509620097,0.004517414616084237,Watermarked +2623,0.7540983606557377,0.15833333333333333,-14.492905117377825,0.0007122141225715405,Watermarked +2624,0.4098360655737705,0.23333333333333334,-8.20310571032784,0.0037911953280657033,Watermarked +2625,0.4666666666666667,0.2568989071038251,-6.954617925271148,0.006098663645228381,Watermarked +2626,0.6721311475409836,0.22916666666666669,-17.17085230252723,0.00043034591455487535,Watermarked +2627,0.36666666666666664,0.2658469945355191,-2.8102104403444548,0.06728103964001445,Watermarked +2628,0.6333333333333333,0.5185109289617487,-7.578250897242925,0.004766379016909529,Watermarked +2629,0.75,0.19460382513661204,-10.85330590771855,0.0016736652012390987,Watermarked +2630,0.35,0.23203551912568304,-2.7592753489500113,0.07019486191792593,Watermarked +2631,0.48333333333333334,0.27404371584699455,-3.6739700755121674,0.03490341382115382,Watermarked +2632,0.4,0.2650273224043716,-2.3392495841230168,0.10129085666135956,Watermarked +2633,0.47540983606557374,0.2791666666666667,-4.2816691505216085,0.023404806469187092,Watermarked +2634,0.48333333333333334,0.24487704918032788,-16.399313267142833,0.0004934130615457107,Watermarked +2635,0.43333333333333335,0.25314207650273224,-4.511368522252938,0.02035188309309117,Watermarked +2636,0.6885245901639344,0.42500000000000004,-7.254802469447766,0.005403292565475987,Watermarked +2637,0.5833333333333334,0.2614071038251366,-6.303666365112512,0.008066415912528251,Watermarked +2638,0.45901639344262296,0.26249999999999996,-4.999367050456228,0.015397780557052671,Watermarked +2639,0.5,0.2284153005464481,-14.21620797979023,0.0007541140145429063,Watermarked +2640,0.6885245901639344,0.24166666666666667,-17.24695131862833,0.0004247197725637856,Watermarked +2641,0.43333333333333335,0.23210382513661199,-5.707259199146484,0.010669808386105378,Watermarked +2642,0.36666666666666664,0.24446721311475408,-3.2958071028221907,0.045877518615205004,Watermarked +2643,0.5737704918032787,0.2666666666666667,-11.653770786981173,0.0013573049832111713,Watermarked +2644,0.3770491803278688,0.24583333333333335,-4.076914857521508,0.02664334911823053,Watermarked +2645,0.4918032786885246,0.2333333333333333,-4.863758570369447,0.016600374637456124,Watermarked +2646,0.5901639344262295,0.2,-6.147724135202747,0.008658307683879244,Watermarked +2647,0.5409836065573771,0.23333333333333334,-17.08972839908021,0.00043645329683166667,Watermarked +2648,0.4426229508196721,0.2583333333333333,-6.667849243203331,0.006877333446979579,Watermarked +2649,0.47540983606557374,0.23333333333333334,-9.186157399702243,0.0027280043187776987,Watermarked +2650,0.5409836065573771,0.4375,-1.9737703484788938,0.14292112743640065,Watermarked +2651,0.5166666666666667,0.2614071038251366,-5.835105865583961,0.010027921838910365,Watermarked +2652,0.45,0.18640710382513662,-8.230490181795378,0.0037547595934809518,Watermarked +2653,0.5737704918032787,0.2583333333333333,-15.901213347642667,0.0005407927864783102,Watermarked +2654,0.5166666666666667,0.2823087431693989,-10.150428532833908,0.0020372645895073123,Watermarked +2655,0.5901639344262295,0.23750000000000002,-6.178438384650274,0.008537325917522763,Watermarked +2656,0.4426229508196721,0.27083333333333337,-3.3626410531598294,0.04364785672227368,Watermarked +2657,0.45,0.3445355191256831,-1.8802591489231106,0.15665493572520436,Watermarked +2658,0.47540983606557374,0.25416666666666665,-4.330653825520752,0.022706457712737285,Watermarked +2659,0.36065573770491804,0.30416666666666664,-3.0571045827459344,0.05511854521199261,Watermarked +2660,0.43333333333333335,0.24023224043715846,-3.433838233132664,0.0414217747379,Watermarked +2661,0.3770491803278688,0.24166666666666664,-2.7999079904008193,0.06785808116951951,Watermarked +2662,0.4666666666666667,0.2189207650273224,-2.8547072311071635,0.06485789803329635,Watermarked +2663,0.5409836065573771,0.4708333333333333,-2.930782830598391,0.06096209353124864,Watermarked +2664,0.5245901639344263,0.24166666666666664,-19.60151487691337,0.0002900998829147762,Watermarked +2665,0.45,0.27329234972677596,-2.6358380883941255,0.07793359764901263,Watermarked +2666,0.4666666666666667,0.2693306010928962,-5.264461390477094,0.013356347106673735,Watermarked +2667,0.6166666666666667,0.22418032786885245,-12.077933694787529,0.0012214555571584704,Watermarked +2668,0.48333333333333334,0.19467213114754098,-7.753219242030175,0.004462810515423161,Watermarked +2669,0.7049180327868853,0.4375,-4.557262825860137,0.01980483320019867,Watermarked +2670,0.8,0.3069672131147541,-32.249362363733205,6.552485880934655e-05,Watermarked +2671,0.4666666666666667,0.29836065573770487,-4.028754863050068,0.027487792515242023,Watermarked +2672,0.6229508196721312,0.21666666666666667,-24.377049180327866,0.0001513221713591045,Watermarked +2673,0.5,0.24870218579234973,-9.945483469303717,0.0021627595309605323,Watermarked +2674,0.48333333333333334,0.2364071038251366,-7.416787809974502,0.005071157125568877,Watermarked +2675,0.5573770491803278,0.25,-9.035003149610084,0.0028632366668939696,Watermarked +2676,0.48333333333333334,0.25724043715846995,-3.574953217198332,0.03742017108862267,Watermarked +2677,0.6,0.26967213114754096,-18.098951116688973,0.0003679240150022286,Watermarked +2678,0.45,0.273155737704918,-2.9827908163528103,0.05846639473064487,Watermarked +2679,0.3442622950819672,0.20416666666666666,-3.023496692892297,0.0566019195281106,Watermarked +2680,0.36666666666666664,0.25293715846994536,-3.3702819227387244,0.04340178717234347,Watermarked +2681,0.7377049180327869,0.19999999999999998,-17.24492210110332,0.00042486852229209587,Watermarked +2682,0.38333333333333336,0.2944672131147541,-2.8051586461529827,0.06756322441457491,Watermarked +2683,0.6557377049180327,0.14583333333333334,-17.025299295981913,0.00044138624174763324,Watermarked +2684,0.4426229508196721,0.275,-2.9340384355396165,0.060801997551779355,Watermarked +2685,0.4098360655737705,0.30833333333333335,-5.11676115950563,0.014446859559659811,Watermarked +2686,0.4166666666666667,0.2201502732240437,-3.2307527644980745,0.04818825006053326,Watermarked +2687,0.4262295081967213,0.2791666666666667,-10.333341919958322,0.0019332884778574828,Watermarked +2688,0.5,0.21577868852459017,-9.111666695534357,0.0027935663080181916,Watermarked +2689,0.4262295081967213,0.21250000000000002,-8.929327630907842,0.002963093223014625,Watermarked +2690,0.4918032786885246,0.4375,-2.2687169915537657,0.10805287324168507,Watermarked +2691,0.5737704918032787,0.2916666666666667,-6.359769644709016,0.007866504158921796,Watermarked +2692,0.39344262295081966,0.26666666666666666,-1.8012412407306406,0.1694691787567682,Watermarked +2693,0.6885245901639344,0.49166666666666675,-6.916091828453289,0.006196281549258644,Watermarked +2694,0.36666666666666664,0.2776639344262295,-1.7553554396997046,0.1774643858274755,Watermarked +2695,0.36065573770491804,0.2916666666666667,-1.899261403354651,0.15374343061615317,Watermarked +2696,0.4666666666666667,0.26584699453551913,-4.361077099679005,0.02228625751121354,Watermarked +2697,0.4262295081967213,0.27083333333333337,-3.735739626625215,0.03344241564305693,Watermarked +2698,0.47540983606557374,0.2375,-7.082180009413329,0.0057895528079337285,Watermarked +2699,0.4262295081967213,0.25416666666666665,-4.377269933993477,0.02206670433265803,Watermarked +2700,0.45901639344262296,0.24583333333333332,-5.194910461282009,0.013855966641037953,Watermarked +2701,0.5,0.28169398907103826,-5.477980736046312,0.011962325656887837,Watermarked +2702,0.55,0.2239754098360656,-10.180119908482158,0.0020198909534647554,Watermarked +2703,0.4918032786885246,0.2125,-4.076969624956957,0.0266424079833841,Watermarked +2704,0.55,0.3689890710382514,-4.353209549753575,0.022393951473013263,Watermarked +2705,0.4098360655737705,0.23333333333333334,-3.01552163240712,0.05696125979179388,Watermarked +2706,0.6557377049180327,0.20416666666666666,-18.1470510379736,0.0003650272002577954,Watermarked +2707,0.5737704918032787,0.2125,-8.356089793398965,0.003593448152596275,Watermarked +2708,0.6333333333333333,0.4937841530054645,-13.399578464312397,0.000898581498174531,Watermarked +2709,0.47540983606557374,0.22916666666666669,-5.559506115611259,0.01148039203424712,Watermarked +2710,0.55,0.2409153005464481,-5.275556807499205,0.013278830036287586,Watermarked +2711,0.39344262295081966,0.25,-4.714005617872331,0.018074445236289073,Watermarked +2712,0.5573770491803278,0.3041666666666667,-6.34727085863382,0.007910472276561024,Watermarked +2713,0.38333333333333336,0.2657103825136612,-3.2393534601694043,0.04787443221098791,Watermarked +2714,0.6166666666666667,0.4649590163934426,-6.9498273973418225,0.0061106911480797435,Watermarked +2715,0.8360655737704918,0.4375,-8.601708046331813,0.0033035386244482587,Watermarked +2716,0.5833333333333334,0.2448087431693989,-8.253674606606968,0.0037242731002333166,Watermarked +2717,0.35,0.24057377049180328,-5.639493439517238,0.011031947405375732,Watermarked +2718,0.5833333333333334,0.21530054644808744,-9.600064444773704,0.002398495294738282,Watermarked +2719,0.65,0.26140710382513666,-8.577234238877079,0.003331005620411765,Watermarked +2720,0.7704918032786885,0.43333333333333335,-18.728885297038175,0.00033227343416865454,Watermarked +2721,0.43333333333333335,0.27827868852459015,-3.3172720549176096,0.04514599422781829,Watermarked +2722,0.5245901639344263,0.2375,-10.122383386195422,0.0020538573428606206,Watermarked +2723,0.45901639344262296,0.3,-3.5639741636483593,0.037713321483800306,Watermarked +2724,0.36666666666666664,0.24480874316939888,-2.9309984722316638,0.06095147293868425,Watermarked +2725,0.4166666666666667,0.31912568306010924,-2.530970883389505,0.08535147636509026,Watermarked +2726,0.5166666666666667,0.24453551912568305,-6.751934642062348,0.006636082160327463,Watermarked +2727,0.3333333333333333,0.24890710382513662,-2.3788710766712677,0.14042438082786876,Watermarked +2728,0.5666666666666667,0.4769125683060109,-3.385054287067082,0.04293102665536962,Watermarked +2729,0.6166666666666667,0.24904371584699453,-16.87653237880174,0.00045306323873643635,Watermarked +2730,0.4426229508196721,0.25416666666666665,-4.94476472119911,0.01586789705539504,Watermarked +2731,0.39344262295081966,0.17916666666666667,-7.782330369943198,0.004414801539065831,Watermarked +2732,0.4666666666666667,0.29829234972677593,-3.6409046416348776,0.035719102507377344,Watermarked +2733,0.6065573770491803,0.18333333333333335,-31.10048861943559,7.303903972764834e-05,Watermarked +2734,0.5737704918032787,0.2,-9.42090076205074,0.0025342671020132644,Watermarked +2735,0.5245901639344263,0.22499999999999998,-5.482448602285279,0.011935236042507276,Watermarked +2736,0.6666666666666666,0.5806010928961749,-3.3948578066343797,0.04262218843494368,Watermarked +2737,0.5737704918032787,0.29166666666666663,-8.21042730655783,0.0037814081546606237,Watermarked +2738,0.32786885245901637,0.1958333333333333,-5.118158442680822,0.014436008902853572,Watermarked +2739,0.55,0.23210382513661199,-12.123135549004308,0.0012080613029538494,Watermarked +2740,0.7166666666666667,0.2116120218579235,-23.085068346256453,0.00017805380137088774,Watermarked +2741,0.4262295081967213,0.3,-2.9706771945521644,0.05903607413760807,Watermarked +2742,0.4262295081967213,0.23333333333333334,-2.8349832104998747,0.0659183394825102,Watermarked +2743,0.5,0.2941256830601093,-5.153821208882444,0.014162630156722995,Watermarked +2744,0.4262295081967213,0.24583333333333335,-2.8527267194908648,0.06496341216084892,Watermarked +2745,0.4166666666666667,0.2069672131147541,-3.211725828402651,0.048891793073391886,Watermarked +2746,0.4,0.2360655737704918,-3.510803607398461,0.03917521433599371,Watermarked +2747,0.38333333333333336,0.3026639344262295,-2.8694731663260846,0.06407793061054502,Watermarked +2748,0.5901639344262295,0.21666666666666667,-7.61224281479834,0.004705316919230158,Watermarked +2749,0.4666666666666667,0.2573087431693989,-5.082825548341707,0.014713685416969534,Watermarked +2750,0.48333333333333334,0.22862021857923498,-3.065403117058221,0.05475981670342542,Watermarked +2751,0.43333333333333335,0.26509562841530054,-2.866308813315449,0.06424408813835332,Watermarked +2752,0.45,0.30273224043715846,-5.035807306036759,0.015094095073797963,Watermarked +2753,0.65,0.3404371584699453,-6.119691979185821,0.008770696803910683,Watermarked +2754,0.5409836065573771,0.20416666666666666,-7.431074551672135,0.005043170304209105,Watermarked +2755,0.5081967213114754,0.25,-6.605727113313177,0.007063029498989654,Watermarked +2756,0.4166666666666667,0.2698087431693989,-9.036379043539897,0.0028619661292264213,Watermarked +2757,0.5081967213114754,0.2791666666666667,-7.116030982441239,0.005711005406184151,Watermarked +2758,0.45,0.28620218579234974,-3.720511834007275,0.033795204522967935,Watermarked +2759,0.4,0.2201502732240437,-5.295186309462342,0.013143123507478411,Watermarked +2760,0.6666666666666666,0.28217213114754097,-16.292961239551758,0.0005030512239375957,Watermarked +2761,0.47540983606557374,0.275,-3.8509508464272586,0.03092048438850596,Watermarked +2762,0.3770491803278688,0.24166666666666664,-6.140373651674497,0.008687594169327536,Watermarked +2763,0.6666666666666666,0.17397540983606558,-18.4422633103658,0.0003478966667273698,Watermarked +2764,0.65,0.4228142076502732,-4.415045976602256,0.02156528848562188,Watermarked +2765,0.7049180327868853,0.19999999999999998,-8.806809104164614,0.003084718357015228,Watermarked +2766,0.45,0.20314207650273225,-9.794313683048983,0.002261966026846209,Watermarked +2767,0.48333333333333334,0.2532103825136612,-5.1768582175628834,0.013989618848905717,Watermarked +2768,0.5833333333333334,0.2275273224043716,-5.726067308431419,0.010572065566569503,Watermarked +2769,0.639344262295082,0.1708333333333333,-12.10156988574371,0.0012144271996402047,Watermarked +2770,0.4098360655737705,0.23333333333333334,-5.951158369984666,0.00948839549552589,Watermarked +2771,0.47540983606557374,0.20416666666666666,-4.968520257961496,0.015661100563564314,Watermarked +2772,0.5333333333333333,0.2527322404371585,-4.948933058520406,0.01583135472899473,Watermarked +2773,0.47540983606557374,0.25,-4.9384820758662515,0.015923182580957953,Watermarked +2774,0.5833333333333334,0.2739071038251366,-13.930528616417067,0.0008008837712144284,Watermarked +2775,0.6229508196721312,0.15833333333333333,-7.384810731767565,0.005134544759237181,Watermarked +2776,0.38333333333333336,0.30273224043715846,-4.0696430409706865,0.026768690892997455,Watermarked +2777,0.48333333333333334,0.3855191256830601,-2.636692552502379,0.07787649597226969,Watermarked +2778,0.5901639344262295,0.22083333333333333,-8.148412107258412,0.003865377073276304,Watermarked +2779,0.45,0.298224043715847,-2.2807020090731873,0.10686619566510568,Watermarked +2780,0.4262295081967213,0.18333333333333332,-4.17817014100824,0.02497413039088068,Watermarked +2781,0.5333333333333333,0.27322404371584696,-5.040542454345847,0.015055208208642631,Watermarked +2782,0.4166666666666667,0.21530054644808744,-4.283002068724982,0.02338543904381472,Watermarked +2783,0.5081967213114754,0.19583333333333333,-7.410760977286471,0.005083024752688326,Watermarked +2784,0.3442622950819672,0.27499999999999997,-4.798632565231811,0.017221458650986843,Watermarked +2785,0.43333333333333335,0.2864071038251366,-4.100734975708682,0.02623802994170771,Watermarked +2786,0.47540983606557374,0.31666666666666665,-4.974054989029201,0.015613424561556911,Watermarked +2787,0.5,0.19863387978142077,-6.300594478470427,0.00807755369838508,Watermarked +2788,0.35,0.244603825136612,-2.2254146723514387,0.11247501852404906,Watermarked +2789,0.6229508196721312,0.2041666666666667,-13.312635870763579,0.0009160652152262239,Watermarked +2790,0.6833333333333333,0.4937158469945355,-8.885516696979227,0.003005843404969766,Watermarked +2791,0.45901639344262296,0.24166666666666664,-13.62086314551715,0.0008560385924757967,Watermarked +2792,0.4,0.26154371584699454,-3.4496293922245655,0.040947730245819165,Watermarked +2793,0.5166666666666667,0.29890710382513663,-3.8375524808634864,0.031200942303648546,Watermarked +2794,0.48333333333333334,0.2693306010928962,-6.246793566459261,0.008275938325843453,Watermarked +2795,0.4666666666666667,0.2655054644808743,-9.468017765185019,0.002497594914513529,Watermarked +2796,0.5166666666666667,0.19501366120218577,-6.412883435087076,0.007683202378919043,Watermarked +2797,0.4666666666666667,0.30683060109289617,-2.6055084852992985,0.07999436123101161,Watermarked +2798,0.5081967213114754,0.19999999999999998,-6.752251856457131,0.006635193291969306,Watermarked +2799,0.6229508196721312,0.20833333333333334,-19.77027395258561,0.0002827789468772716,Watermarked +2800,0.6557377049180327,0.4208333333333334,-10.236282712367263,0.0019875612473827316,Watermarked +2801,0.7049180327868853,0.4541666666666667,-4.489735890023653,0.020616535562147837,Watermarked +2802,0.6166666666666667,0.36038251366120216,-4.0670388186695,0.02681376369186907,Watermarked +2803,0.6833333333333333,0.4560792349726776,-5.9382601309311465,0.009546449129946996,Watermarked +2804,0.5245901639344263,0.425,-2.2720572719623497,0.10772054769411174,Watermarked +2805,0.7049180327868853,0.45,-3.6218924086243263,0.036199162722362586,Watermarked +2806,0.5666666666666667,0.38545081967213113,-5.020124422686412,0.015223827432133234,Watermarked +2807,0.5833333333333334,0.4937158469945355,-3.4194195863846306,0.041860729913222816,Watermarked +2808,0.5737704918032787,0.5,-2.260714065293725,0.10885413598684734,Watermarked +2809,0.7,0.3735655737704918,-10.454060628342173,0.0018684937780359754,Watermarked +2810,0.639344262295082,0.4,-9.756144412236056,0.0022879647959347044,Watermarked +2811,0.7377049180327869,0.39583333333333337,-7.4590163934426235,0.004989020225851709,Watermarked +2812,0.6333333333333333,0.3608606557377049,-12.95979435165334,0.0009918481137665518,Watermarked +2813,0.6065573770491803,0.37083333333333335,-10.272001058557553,0.001967356422200276,Watermarked +2814,0.7,0.435655737704918,-6.464633818447615,0.00750996829937376,Watermarked +2815,0.7166666666666667,0.45211748633879784,-15.725247417039386,0.0005589740150536343,Watermarked +2816,0.5901639344262295,0.4125,-3.7159734017373043,0.033901269284630456,Watermarked +2817,0.55,0.40191256830601096,-2.7474782254251675,0.07089198741296619,Watermarked +2818,0.6,0.45642076502732243,-10.118093819799428,0.0020564110187835205,Watermarked +2819,0.5166666666666667,0.39822404371584696,-2.815880143112684,0.06696608581805782,Watermarked +2820,0.639344262295082,0.48333333333333334,-5.404377109955353,0.012420402409580856,Watermarked +2821,0.5573770491803278,0.4083333333333333,-6.759987541853552,0.006613565506268803,Watermarked +2822,0.6721311475409836,0.4916666666666667,-6.965216353380622,0.006072164935164103,Watermarked +2823,0.5833333333333334,0.3817622950819672,-6.45646486744287,0.007536970061691775,Watermarked +2824,0.6229508196721312,0.425,-18.39984547099852,0.0003502913010235412,Watermarked +2825,0.6229508196721312,0.3583333333333334,-6.037002655406131,0.009113578699967435,Watermarked +2826,0.6065573770491803,0.37083333333333335,-8.311288381099306,0.003649917396478642,Watermarked +2827,0.6229508196721312,0.3833333333333333,-5.370455913263936,0.012639237313128257,Watermarked +2828,0.5166666666666667,0.44371584699453553,-2.834044674981603,0.06596933591930301,Watermarked +2829,0.6333333333333333,0.5101092896174864,-3.547608678177134,0.038155756341829127,Watermarked +2830,0.55,0.43558743169398906,-3.3200966453052976,0.045050836488169606,Watermarked +2831,0.6166666666666667,0.4814890710382514,-8.921688593482292,0.0029704891181838475,Watermarked +2832,0.6885245901639344,0.36249999999999993,-10.885700407196474,0.0016590620208720404,Watermarked +2833,0.5901639344262295,0.4625,-3.8807823860207193,0.030307571526173836,Watermarked +2834,0.6065573770491803,0.4041666666666667,-3.796834767979554,0.032073534237681285,Watermarked +2835,0.5573770491803278,0.35,-6.095615458270268,0.008868760073876481,Watermarked +2836,0.5833333333333334,0.4852459016393443,-3.2682313237305527,0.04683949096861523,Watermarked +2837,0.5081967213114754,0.4291666666666667,-3.443843878513792,0.041120604000996194,Watermarked +2838,0.5833333333333334,0.4435109289617486,-3.1918220004780653,0.04964171462252754,Watermarked +2839,0.6885245901639344,0.45833333333333337,-13.269636352208451,0.000924879437843544,Watermarked +2840,0.5833333333333334,0.3773224043715847,-4.587084180430928,0.01945956097411938,Watermarked +2841,0.6229508196721312,0.4666666666666667,-5.570797323763848,0.011415660012176308,Watermarked +2842,0.7049180327868853,0.4541666666666666,-3.965300941783783,0.028653776872905008,Watermarked +2843,0.7868852459016393,0.32916666666666666,-12.411848878312394,0.0011269497196247178,Watermarked +2844,0.7213114754098361,0.4291666666666667,-8.523579646154461,0.0033922878667983014,Watermarked +2845,0.5666666666666667,0.47704918032786886,-5.705858082917919,0.01067713705598062,Watermarked +2846,0.5573770491803278,0.36250000000000004,-4.348783893927762,0.022454827486827797,Watermarked +2847,0.6065573770491803,0.4708333333333333,-3.0287534515367915,0.05636661686037541,Watermarked +2848,0.5166666666666667,0.4523224043715847,-4.428901327226881,0.02138508188391737,Watermarked +2849,0.639344262295082,0.37916666666666665,-5.497766397039839,0.011842967860121692,Watermarked +2850,0.65,0.3694672131147541,-7.27109198627894,0.005368618417249257,Watermarked +2851,0.7049180327868853,0.35833333333333334,-8.672148663324567,0.003226136549403669,Watermarked +2852,0.5901639344262295,0.3625,-4.761760610530732,0.01758652689930633,Watermarked +2853,0.45,0.3647540983606557,-7.531019558041822,0.017178611116784117,Watermarked +2854,0.5081967213114754,0.4083333333333333,-3.1652931763476935,0.05066398901162397,Watermarked +2855,0.6065573770491803,0.3625,-6.508196721311476,0.007368102187221459,Watermarked +2856,0.5166666666666667,0.4562841530054645,-2.5241076101780897,0.08586662930511714,Watermarked +2857,0.7049180327868853,0.42916666666666664,-5.888028951637414,0.00977702506049218,Watermarked +2858,0.6,0.4227459016393443,-4.090863905829874,0.02640502342776308,Watermarked +2859,0.6333333333333333,0.38504098360655736,-3.331454908237819,0.044670745752969254,Watermarked +2860,0.5166666666666667,0.4189207650273224,-2.2659856668962592,0.10832553396730833,Watermarked +2861,0.6557377049180327,0.4125,-8.339578454332552,0.0036141243204565873,Watermarked +2862,0.5737704918032787,0.4083333333333333,-2.501174736689476,0.08761587052799776,Watermarked +2863,0.7166666666666667,0.40669398907103826,-7.514384861135337,0.004883963889775299,Watermarked +2864,0.6333333333333333,0.3781420765027323,-5.634742497670123,0.011057936145282518,Watermarked +2865,0.5,0.3776639344262295,-4.858609077032151,0.01664841190941072,Watermarked +2866,0.5573770491803278,0.4625,-2.1172312753822844,0.12450912024159524,Watermarked +2867,0.6557377049180327,0.43750000000000006,-9.957781870946098,0.002154944280259222,Watermarked +2868,0.6557377049180327,0.37916666666666665,-7.6307221538215995,0.004672553983451911,Watermarked +2869,0.6,0.4523224043715847,-6.124375580236181,0.00875178616778322,Watermarked +2870,0.5,0.34795081967213115,-2.822931024840667,0.06657696557341944,Watermarked +2871,0.7704918032786885,0.3208333333333333,-14.29409263370358,0.0007419958228994995,Watermarked +2872,0.6557377049180327,0.3416666666666667,-11.034070169059069,0.0015942956657695156,Watermarked +2873,0.55,0.4352459016393443,-2.8995067504928964,0.06252731441759059,Watermarked +2874,0.6,0.3688524590163934,-4.890622801652107,0.016352674036444928,Watermarked +2875,0.6065573770491803,0.36666666666666664,-4.753997634988252,0.01766466674069792,Watermarked +2876,0.5409836065573771,0.4375,-12.970212234731923,0.0009894935210338785,Watermarked +2877,0.5245901639344263,0.32916666666666666,-5.592518411047395,0.011292471025751814,Watermarked +2878,0.4918032786885246,0.39583333333333337,-4.8738225751540805,0.016507011615195296,Watermarked +2879,0.5081967213114754,0.43333333333333335,-3.1761845581166224,0.050241108410324004,Watermarked +2880,0.6557377049180327,0.43749999999999994,-3.491803278688525,0.039715098203703336,Watermarked +2881,0.5081967213114754,0.375,-3.0387597258755705,0.05592210337011148,Watermarked +2882,0.7377049180327869,0.4458333333333333,-8.515608096333422,0.0034015195584640207,Watermarked +2883,0.6666666666666666,0.45170765027322407,-4.285643375079597,0.02334712177086421,Watermarked +2884,0.48333333333333334,0.4147540983606558,-2.7228776982622653,0.07237357811230395,Watermarked +2885,0.65,0.46509562841530055,-5.141289506242194,0.014257918214039442,Watermarked +2886,0.47540983606557374,0.4208333333333334,-2.4902246368970027,0.08846651702809234,Watermarked +2887,0.5737704918032787,0.43333333333333335,-2.8622478469687973,0.06445811513039201,Watermarked +2888,0.6333333333333333,0.45157103825136613,-2.989953698734684,0.05813278125064246,Watermarked +2889,0.5573770491803278,0.3833333333333333,-3.057286949843683,0.05511063014547841,Watermarked +2890,0.6065573770491803,0.4166666666666667,-10.548278185933164,0.0018199105302834477,Watermarked +2891,0.6166666666666667,0.49337431693989064,-2.7692008371142,0.0696149257313345,Watermarked +2892,0.5333333333333333,0.3858606557377049,-3.299959459423778,0.0457348408632345,Watermarked +2893,0.5901639344262295,0.4625,-3.800342933561155,0.031997130689119066,Watermarked +2894,0.4666666666666667,0.3866120218579235,-1.7856892936312567,0.21607017204333234,Watermarked +2895,0.5737704918032787,0.4916666666666667,-3.723879479976154,0.0337167759198274,Watermarked +2896,0.5333333333333333,0.43948087431693994,-2.3754283562972685,0.09802174273100749,Watermarked +2897,0.5333333333333333,0.37725409836065577,-3.8749319798464548,0.03042653470881461,Watermarked +2898,0.639344262295082,0.4,-13.295383727881227,0.0009195881666894832,Watermarked +2899,0.5666666666666667,0.4316939890710383,-6.669993903142867,0.006871037958981735,Watermarked +2900,0.7049180327868853,0.4916666666666667,-9.672172825695142,0.0023465711838438533,Watermarked +2901,0.7049180327868853,0.4666666666666667,-9.041003671038775,0.0028577010889647217,Watermarked +2902,0.5666666666666667,0.4603825136612022,-3.7003174802745145,0.03427044013621927,Watermarked +2903,0.7666666666666667,0.3653005464480874,-8.367710625283678,0.0035789897552198605,Watermarked +2904,0.5245901639344263,0.3666666666666667,-11.604959601054722,0.0013742030324602752,Watermarked +2905,0.65,0.4771174863387978,-4.574500631486023,0.019604293549578128,Watermarked +2906,0.5409836065573771,0.39999999999999997,-4.521534453338067,0.020229034371625897,Watermarked +2907,0.5833333333333334,0.4359972677595628,-3.71598604315986,0.03390097325869757,Watermarked +2908,0.6065573770491803,0.37083333333333335,-4.12972900744298,0.025755332738221934,Watermarked +2909,0.7377049180327869,0.35416666666666663,-11.65895831862298,0.0013555253490377533,Watermarked +2910,0.6557377049180327,0.37916666666666665,-8.407323638843176,0.0035302782268764117,Watermarked +2911,0.5901639344262295,0.32499999999999996,-8.404697750055918,0.003533480017692563,Watermarked +2912,0.6333333333333333,0.47704918032786886,-3.572872262389389,0.037475510464252,Watermarked +2913,0.5409836065573771,0.4041666666666667,-3.3807925592909616,0.04306616942393178,Watermarked +2914,0.55,0.43558743169398906,-4.55116556463313,0.01987640371413144,Watermarked +2915,0.6885245901639344,0.5,-3.265341686400343,0.04694176800449449,Watermarked +2916,0.5333333333333333,0.42711748633879776,-3.8576108132230886,0.030782280046241853,Watermarked +2917,0.55,0.4313524590163934,-3.8365066246858697,0.03122297154382187,Watermarked +2918,0.6885245901639344,0.3458333333333333,-11.442187082108672,0.0014326132345143508,Watermarked +2919,0.5833333333333334,0.4420765027322404,-4.718756309978549,0.04209478615042156,Watermarked +2920,0.65,0.45266393442622954,-5.549004690060408,0.011541028000440172,Watermarked +2921,0.6065573770491803,0.4666666666666667,-4.2869837090014355,0.02332770866411145,Watermarked +2922,0.5573770491803278,0.4291666666666667,-3.7406497535262835,0.03332966691975382,Watermarked +2923,0.7333333333333333,0.5189890710382514,-7.795621704107285,0.004393108665011521,Watermarked +2924,0.6166666666666667,0.3902322404371585,-7.19301070709552,0.005537540195779914,Watermarked +2925,0.5833333333333334,0.360724043715847,-7.579568529572813,0.004763992639250445,Watermarked +2926,0.7333333333333333,0.3858606557377049,-14.089106567555575,0.0007744612617815288,Watermarked +2927,0.5573770491803278,0.4041666666666667,-2.527395473158105,0.08561936927249675,Watermarked +2928,0.7377049180327869,0.3875,-12.007025761124119,0.001242867214816079,Watermarked +2929,0.5333333333333333,0.4230874316939891,-3.347260874883563,0.04414856066329664,Watermarked +2930,0.7540983606557377,0.35000000000000003,-16.471846559014978,0.00048698021922821526,Watermarked +2931,0.7049180327868853,0.38749999999999996,-39.783878716039034,3.4943182204626624e-05,Watermarked +2932,0.7049180327868853,0.3833333333333333,-27.287366605133304,0.0001080165038157327,Watermarked +2933,0.5833333333333334,0.3605874316939891,-4.472129156189748,0.020835236225830767,Watermarked +2934,0.5409836065573771,0.42083333333333334,-6.50235424552611,0.007386922898311131,Watermarked +2935,0.7,0.36502732240437163,-7.572061952950856,0.004777609010065385,Watermarked +2936,0.6721311475409836,0.425,-4.356743170503809,0.022345498848743366,Watermarked +2937,0.5333333333333333,0.4153688524590164,-3.1703304743958,0.05046784910179293,Watermarked +2938,0.48333333333333334,0.3284153005464481,-3.071040072795025,0.05451781694215867,Watermarked +2939,0.5901639344262295,0.4375,-5.097326714872012,0.014598887274167346,Watermarked +2940,0.43333333333333335,0.39781420765027325,-16.249999999999922,0.0037656052779771634,Watermarked +2941,0.5,0.35947176684881604,-3.9829601606847738,0.05763976433444822,Watermarked +2942,0.5,0.4313524590163934,-1.9917554271299593,0.14044440711390466,Watermarked +2943,0.5666666666666667,0.41905737704918034,-7.182256405781872,0.005561352568453646,Watermarked +2944,0.5166666666666667,0.3982923497267759,-5.599507245339936,0.011253204715190618,Watermarked +2945,0.5333333333333333,0.39829234972677596,-4.723689577243244,0.017974094449026225,Watermarked +2946,0.5901639344262295,0.38333333333333336,-4.2565386541927595,0.02377386602678355,Watermarked +2947,0.6229508196721312,0.42499999999999993,-3.2030019695384855,0.04921870916506516,Watermarked +2948,0.45901639344262296,0.3875,-1.7671941136183904,0.1753608911595616,Watermarked +2949,0.6721311475409836,0.42500000000000004,-5.444705021331377,0.01216663456000868,Watermarked +2950,0.5573770491803278,0.3375,-4.229541871678541,0.024178754668284275,Watermarked +2951,0.65,0.48087431693989074,-2.870168301345979,0.06404150157384371,Watermarked +2952,0.639344262295082,0.3958333333333333,-8.130635229762294,0.003889900792911817,Watermarked +2953,0.5901639344262295,0.4541666666666667,-4.048412402739574,0.02713900943382794,Watermarked +2954,0.5666666666666667,0.4519808743169399,-2.7715039029613733,0.0694812123937475,Watermarked +2955,0.6333333333333333,0.4562841530054645,-6.639086391609726,0.006962502866864333,Watermarked +2956,0.6,0.3402322404371585,-5.379181269835038,0.012582469390043725,Watermarked +2957,0.6557377049180327,0.4125,-3.2184309034451113,0.04864239224720965,Watermarked +2958,0.5833333333333334,0.3690573770491803,-6.0781372785179055,0.008940851611301535,Watermarked +2959,0.6229508196721312,0.37916666666666665,-11.123418935025548,0.0015569047785033382,Watermarked +2960,0.6557377049180327,0.38749999999999996,-8.52694846917119,0.0033883964608289563,Watermarked +2961,0.6166666666666667,0.4227459016393443,-3.8587298371444883,0.030759136480267034,Watermarked +2962,0.5666666666666667,0.4936475409836066,-2.5087837781411735,0.08703067245521756,Watermarked +2963,0.7377049180327869,0.39583333333333337,-21.672082554883993,0.00021500571596787181,Watermarked +2964,0.5666666666666667,0.4441256830601093,-4.114953099885994,0.025999880249744926,Watermarked +2965,0.7,0.3152322404371585,-27.094781124739157,0.00011032862635579921,Watermarked +2966,0.819672131147541,0.375,-9.79685729377205,0.002260247404737733,Watermarked +2967,0.4918032786885246,0.325,-4.952772918130208,0.015797788968726655,Watermarked +2968,0.6557377049180327,0.4083333333333333,-7.3460047131317,0.00521288063379473,Watermarked +2969,0.45,0.3919854280510018,-1.2592474439830368,0.33499662016654086,Watermarked +2970,0.639344262295082,0.41250000000000003,-6.491699980107792,0.00742140717977802,Watermarked +2971,0.6557377049180327,0.39583333333333337,-16.47597884542129,0.0004866171022863723,Watermarked +2972,0.6833333333333333,0.4605874316939891,-9.319488874116459,0.002615651895897316,Watermarked +2973,0.6666666666666666,0.37725409836065577,-9.746825292040107,0.0022943725355199732,Watermarked +2974,0.6166666666666667,0.4519125683060109,-4.467667765204196,0.020891129917487965,Watermarked +2975,0.639344262295082,0.3833333333333333,-3.8203179931250597,0.03156651788894358,Watermarked +2976,0.7540983606557377,0.41250000000000003,-6.306431273644387,0.008056408388901619,Watermarked +2977,0.7166666666666667,0.4853142076502732,-5.664156791225322,0.010898310561203092,Watermarked +2978,0.6166666666666667,0.35239071038251363,-10.603077839669773,0.001792421776010322,Watermarked +2979,0.5333333333333333,0.42288251366120216,-2.792068808043707,0.06830128777723389,Watermarked +2980,0.5573770491803278,0.3666666666666667,-6.797930999977556,0.00650881472279479,Watermarked +2981,0.5573770491803278,0.4416666666666667,-2.8952739132187233,0.06274299207471501,Watermarked +2982,0.55,0.4230191256830601,-3.5968393291702223,0.03684441767090311,Watermarked +2983,0.5833333333333334,0.36495901639344264,-8.629044905647506,0.003273211490827028,Watermarked +2984,0.5245901639344263,0.4041666666666667,-2.9757050833898977,0.05879878128106479,Watermarked +2985,0.7377049180327869,0.4666666666666667,-15.055959655317555,0.0006360495232655051,Watermarked +2986,0.5737704918032787,0.42083333333333334,-3.1359127797177497,0.05182736123156866,Watermarked +2987,0.7377049180327869,0.36666666666666664,-12.510329824209249,0.0011009408982364437,Watermarked +2988,0.65,0.4400273224043716,-4.731321598151042,0.017895516622468247,Watermarked +2989,0.6721311475409836,0.41666666666666663,-8.004718441897252,0.004069631315123374,Watermarked +2990,0.6885245901639344,0.37083333333333335,-10.34387657107953,0.0019275165468191863,Watermarked +2991,0.55,0.4271174863387978,-4.102891976310035,0.026201720340923294,Watermarked +2992,0.5166666666666667,0.4565573770491803,-3.871284033511624,0.030501016317830993,Watermarked +2993,0.6229508196721312,0.375,-7.362230013436795,0.005179936955568627,Watermarked +2994,0.5081967213114754,0.4041666666666667,-5.629951970401516,0.01108422263908312,Watermarked +2995,0.6666666666666666,0.38989071038251366,-7.027749672525287,0.005918864007946058,Watermarked +2996,0.6,0.4684426229508197,-3.0715294425556663,0.05449687138415423,Watermarked +2997,0.55,0.3942622950819672,-5.155433917254585,0.01415042791070789,Watermarked +2998,0.5166666666666667,0.4093806921675774,-2.1811187398383804,0.16093929016287928,Watermarked +2999,0.5666666666666667,0.4518442622950819,-2.7116536407871275,0.07306231680161718,Watermarked +3000,0.5333333333333333,0.38995901639344266,-4.717870559277373,0.01803430740365783,Watermarked +3001,0.5333333333333333,0.33572404371584696,-5.92922902548398,0.009587374714266027,Watermarked +3002,0.7540983606557377,0.31666666666666665,-8.09966109145276,0.0039331240116504485,Watermarked +3003,0.5,0.3855191256830601,-2.7763365971020724,0.06920166849483687,Watermarked +3004,0.7049180327868853,0.35833333333333334,-13.376800824017138,0.0009031187936782386,Watermarked +3005,0.4666666666666667,0.3734972677595629,-2.323557226050761,0.10274990807505914,Watermarked +3006,0.6166666666666667,0.4319672131147541,-3.959212335659781,0.028768976615567175,Watermarked +3007,0.5333333333333333,0.3937841530054645,-3.3540166875307085,0.04392773109730455,Watermarked +3008,0.6666666666666666,0.3860655737704918,-6.951550947054901,0.006106360263638599,Watermarked +3009,0.7704918032786885,0.3875,-11.40102880068347,0.0014479028367797866,Watermarked +3010,0.6333333333333333,0.43572404371584705,-11.348123445291295,0.0014678760475146785,Watermarked +3011,0.7213114754098361,0.2875,-27.500378882331333,0.00010553371875927762,Watermarked +3012,0.5333333333333333,0.30683060109289617,-10.923824320280426,0.0016420917950501913,Watermarked +3013,0.5409836065573771,0.3125,-3.893753295658278,0.03004594161105454,Watermarked +3014,0.6666666666666666,0.3523907103825137,-6.8536427559842705,0.006358933335598355,Watermarked +3015,0.6166666666666667,0.4937158469945355,-4.691276871564279,0.018312849057309264,Watermarked +3016,0.6557377049180327,0.33333333333333337,-5.788824671837737,0.010254290938715958,Watermarked +3017,0.55,0.3273224043715847,-5.436414841751896,0.012218244055546734,Watermarked +3018,0.4918032786885246,0.3333333333333333,-3.361655189247521,0.043679734758324804,Watermarked +3019,0.5245901639344263,0.37916666666666665,-2.9532447616676007,0.059868156846439195,Watermarked +3020,0.5737704918032787,0.43333333333333335,-4.127992550132961,0.025783917599408773,Watermarked +3021,0.5166666666666667,0.3692622950819672,-8.104386708891786,0.003926488707999252,Watermarked +3022,0.5245901639344263,0.3666666666666667,-4.309973795297764,0.022997950137582556,Watermarked +3023,0.5166666666666667,0.3651639344262295,-3.2122265752036507,0.04887311160481489,Watermarked +3024,0.5666666666666667,0.39002732240437155,-3.9886831470208737,0.02821684888437735,Watermarked +3025,0.5901639344262295,0.3458333333333333,-4.2504183551249355,0.02386488383717604,Watermarked +3026,0.6229508196721312,0.32499999999999996,-6.880879437718984,0.006287310969453765,Watermarked +3027,0.5666666666666667,0.4148907103825137,-6.273699658520553,0.008175934849579894,Watermarked +3028,0.5081967213114754,0.375,-2.754702614168314,0.07046406236659389,Watermarked +3029,0.4918032786885246,0.3958333333333333,-6.7433095204558455,0.006660310850275737,Watermarked +3030,0.5737704918032787,0.38749999999999996,-4.020027050959187,0.02764449995869553,Watermarked +3031,0.55,0.4400273224043716,-2.9245926757956453,0.061267955766122074,Watermarked +3032,0.6885245901639344,0.375,-7.524590163934425,0.004864919741574715,Watermarked +3033,0.7333333333333333,0.28647540983606556,-23.29092393225503,0.00017339475386319076,Watermarked +3034,0.5081967213114754,0.3458333333333333,-3.5424739195231005,0.038295935976556825,Watermarked +3035,0.55,0.36099726775956287,-6.160098807321899,0.008609295466603118,Watermarked +3036,0.5,0.4400956284153006,-2.282897070641292,0.10665056460060143,Watermarked +3037,0.55,0.41994535519125686,-18.636835301251896,0.0028667184146571527,Watermarked +3038,0.5833333333333334,0.4271174863387978,-5.673540790894663,0.010848020755982598,Watermarked +3039,0.5333333333333333,0.44405737704918036,-3.2047054428325206,0.04915465698343356,Watermarked +3040,0.7377049180327869,0.3625,-11.657726867446977,0.0013559475300634065,Watermarked +3041,0.5333333333333333,0.36919398907103823,-6.079597077972364,0.008934801065512471,Watermarked +3042,0.7704918032786885,0.3416666666666666,-39.859982700803556,3.47437157745963e-05,Watermarked +3043,0.6065573770491803,0.33749999999999997,-9.2248243559719,0.0026947741822498272,Watermarked +3044,0.7,0.4937158469945355,-8.461896046771958,0.0034645967635944996,Watermarked +3045,0.6333333333333333,0.33551912568306014,-6.233747163298384,0.008325007314823952,Watermarked +3046,0.5409836065573771,0.36249999999999993,-12.541115855481232,0.0010929740797637178,Watermarked +3047,0.5901639344262295,0.39166666666666666,-3.4744562728924717,0.040216306514409425,Watermarked +3048,0.5166666666666667,0.40204918032786885,-2.291468930292345,0.10581353287285375,Watermarked +3049,0.6229508196721312,0.42500000000000004,-3.9774734463808428,0.028425239599828966,Watermarked +3050,0.5833333333333334,0.41898907103825134,-7.392993166333208,0.005118225808922169,Watermarked +3051,0.48333333333333334,0.42732240437158475,-2.616902813780725,0.07921236839961415,Watermarked +3052,0.5666666666666667,0.323224043715847,-6.786089292025601,0.006541270274250083,Watermarked +3053,0.6,0.3608606557377049,-6.358986733603966,0.007869248834482912,Watermarked +3054,0.7,0.32786885245901637,-15.380260404150008,0.0005970483655978608,Watermarked +3055,0.6,0.44849726775956283,-4.876103332248839,0.016485948236315353,Watermarked +3056,0.4166666666666667,0.3813296903460838,-3.099243212836256,0.09023900265155212,Watermarked +3057,0.5333333333333333,0.3650956284153006,-4.977491357685013,0.015583918885552142,Watermarked +3058,0.55,0.38545081967213113,-2.575853873389877,0.08207463601397176,Watermarked +3059,0.639344262295082,0.4,-5.945876360497398,0.009512113138117317,Watermarked +3060,0.6833333333333333,0.37739071038251365,-5.80205523533122,0.010188900334164743,Watermarked +3061,0.45901639344262296,0.375,-2.8708139379727218,0.06400768969988004,Watermarked +3062,0.6,0.4312158469945355,-3.2417038925561115,0.04778912095848229,Watermarked +3063,0.6,0.36933060109289617,-6.847157062045659,0.006376146501783941,Watermarked +3064,0.5166666666666667,0.36912568306010923,-7.1062884927858425,0.005733467077920565,Watermarked +3065,0.639344262295082,0.4,-11.123716102549825,0.0015567823708010134,Watermarked +3066,0.6557377049180327,0.4166666666666667,-2.859337167599795,0.06461206589437947,Watermarked +3067,0.6166666666666667,0.41475409836065574,-8.016759438310023,0.004051975840158878,Watermarked +3068,0.5409836065573771,0.37083333333333335,-3.8415339067926904,0.03111726238521636,Watermarked +3069,0.6666666666666666,0.35273224043715845,-8.96185148037276,0.0029318759099350045,Watermarked +3070,0.6833333333333333,0.3729508196721312,-6.271729950256445,0.008183201757294712,Watermarked +3071,0.5081967213114754,0.3416666666666667,-3.845846875093736,0.031026938964603432,Watermarked +3072,0.48333333333333334,0.3443306010928962,-6.819969086862238,0.006448975326342902,Watermarked +3073,0.5245901639344263,0.4083333333333333,-2.753689634807033,0.0705238707366333,Watermarked +3074,0.5166666666666667,0.3471766848816029,-2.090326535903569,0.1717476912950698,Watermarked +3075,0.639344262295082,0.33333333333333337,-6.424891128611615,0.007642542451366619,Watermarked +3076,0.5,0.37745901639344265,-5.307134884333113,0.013061404240156596,Watermarked +3077,0.5166666666666667,0.38558743169398907,-3.5605909993218026,0.037804245228875336,Watermarked +3078,0.7704918032786885,0.30833333333333335,-13.194558267956465,0.0009405413630643912,Watermarked +3079,0.7540983606557377,0.3625,-11.206538990050577,0.001523158238254348,Watermarked +3080,0.47540983606557374,0.3416666666666666,-2.7938036914857083,0.06820289228132356,Watermarked +3081,0.5666666666666667,0.3652322404371584,-6.1214946474517395,0.008763411957595306,Watermarked +3082,0.6721311475409836,0.38333333333333336,-8.021244634914645,0.004045425154564468,Watermarked +3083,0.5,0.423224043715847,-3.689180746726386,0.03453618347086469,Watermarked +3084,0.5737704918032787,0.42500000000000004,-3.2776582466721216,0.04650778639185666,Watermarked +3085,0.5081967213114754,0.3666666666666667,-2.5040961733848097,0.08739061882540976,Watermarked +3086,0.7868852459016393,0.3666666666666667,-10.75090825425996,0.0017209617612538612,Watermarked +3087,0.5409836065573771,0.45833333333333337,-9.918032786885249,0.002180339575209168,Watermarked +3088,0.7540983606557377,0.3083333333333333,-27.93518318279224,0.0001006969515148238,Watermarked +3089,0.5333333333333333,0.3562841530054645,-3.0165050287504305,0.05691679534461103,Watermarked +3090,0.5245901639344263,0.4,-2.792392128219332,0.06828293698877178,Watermarked +3091,0.5,0.3187841530054645,-3.0923105332523986,0.053616690038148854,Watermarked +3092,0.6065573770491803,0.35833333333333334,-5.034902560136769,0.015101540087103106,Watermarked +3093,0.7377049180327869,0.3416666666666667,-6.611700289223251,0.0070448899240306145,Watermarked +3094,0.48333333333333334,0.3692622950819672,-3.8314981973665483,0.03132874291483428,Watermarked +3095,0.5901639344262295,0.35,-4.446971954645207,0.021152969420364667,Watermarked +3096,0.5666666666666667,0.4068306010928962,-5.623397158172275,0.011120322780836269,Watermarked +3097,0.5833333333333334,0.37684426229508194,-3.3673616030988653,0.04349562581061357,Watermarked +3098,0.4918032786885246,0.36250000000000004,-3.567536931880534,0.03761787225198254,Watermarked +3099,0.6833333333333333,0.33599726775956285,-19.586542860762346,0.0002907615152797266,Watermarked +3100,0.75,0.3402322404371585,-98.69856142441458,2.2928614096026574e-06,Watermarked +3101,0.43333333333333335,0.34763205828779603,-2.164602989862948,0.16283491133527353,Watermarked +3102,0.7540983606557377,0.4083333333333333,-13.34516471643176,0.0009094713400266177,Watermarked +3103,0.5833333333333334,0.3486338797814208,-4.5541089442634854,0.019841812100798096,Watermarked +3104,0.6721311475409836,0.325,-10.30712201881151,0.0019477541958685205,Watermarked +3105,0.4426229508196721,0.3416666666666666,-9.384048173269779,0.0025634488134986455,Watermarked +3106,0.5333333333333333,0.3570355191256831,-5.484975321134636,0.011919951523226141,Watermarked +3107,0.6,0.4395491803278689,-6.233752412361234,0.00832498749577228,Watermarked +3108,0.5166666666666667,0.33613387978142073,-5.360731285926958,0.012702901609225676,Watermarked +3109,0.6166666666666667,0.4563524590163935,-3.2403169660324136,0.04783943753394908,Watermarked +3110,0.7049180327868853,0.3875,-20.121911614894287,0.0002682965121865791,Watermarked +3111,0.5901639344262295,0.4083333333333333,-9.166095697580069,0.002745459142872881,Watermarked +3112,0.6721311475409836,0.35833333333333334,-5.742446164819555,0.010487900478810956,Watermarked +3113,0.6721311475409836,0.3666666666666667,-7.815029537231881,0.004361685111408271,Watermarked +3114,0.6229508196721312,0.44999999999999996,-3.1053610315875337,0.05307307725713686,Watermarked +3115,0.55,0.36454918032786887,-3.2455626872934524,0.04764947889276525,Watermarked +3116,0.5333333333333333,0.37356557377049177,-3.757035055298464,0.032956925528189414,Watermarked +3117,0.639344262295082,0.39999999999999997,-5.493611809608042,0.011867900939411315,Watermarked +3118,0.5833333333333334,0.3897540983606558,-7.467934583305662,0.004971898730909863,Watermarked +3119,0.5,0.42342896174863387,-4.800324631602227,0.01720494304786845,Watermarked +3120,0.5737704918032787,0.33749999999999997,-14.977768942526218,0.0006459569455958266,Watermarked +3121,0.5166666666666667,0.29849726775956287,-4.379387604175447,0.022038198857139507,Watermarked +3122,0.4918032786885246,0.39166666666666666,-3.42163677485204,0.041792847188906634,Watermarked +3123,0.7213114754098361,0.35833333333333334,-16.463141062057648,0.0004877463789961118,Watermarked +3124,0.6833333333333333,0.3688524590163934,-7.8974364585461565,0.00423151561991115,Watermarked +3125,0.5901639344262295,0.37083333333333335,-3.8425267583017693,0.03109643993412762,Watermarked +3126,0.6065573770491803,0.3916666666666667,-4.682079879777176,0.01841047771126608,Watermarked +3127,0.6333333333333333,0.4646174863387978,-5.6726426012368485,0.010852821078198268,Watermarked +3128,0.4426229508196721,0.31666666666666665,-3.324802624172649,0.04489286075866494,Watermarked +3129,0.5409836065573771,0.31666666666666665,-5.419860209810303,0.012322162365242495,Watermarked +3130,0.5833333333333334,0.36045081967213116,-4.440891852865153,0.02123070084724967,Watermarked +3131,0.6166666666666667,0.3941256830601093,-3.223851145752677,0.04844195656899026,Watermarked +3132,0.6065573770491803,0.45833333333333337,-6.722810708442813,0.00671836273975336,Watermarked +3133,0.55,0.41461748633879786,-4.609601498177781,0.01920401476029972,Watermarked +3134,0.45,0.3482923497267759,-3.935890584039202,0.029215786175393728,Watermarked +3135,0.6721311475409836,0.35,-14.97130283665096,0.0006467854175943383,Watermarked +3136,0.7540983606557377,0.3541666666666667,-16.708601239682135,0.00046674362746457226,Watermarked +3137,0.5409836065573771,0.37916666666666665,-2.2740071535885997,0.10752712396614864,Watermarked +3138,0.6333333333333333,0.43907103825136606,-2.820281518337947,0.06672285377972131,Watermarked +3139,0.6885245901639344,0.3625,-3.419275812902317,0.04186513660401589,Watermarked +3140,0.6885245901639344,0.35833333333333334,-6.456069857786826,0.007538278964860644,Watermarked +3141,0.5081967213114754,0.4291666666666667,-1.8998904386836808,0.15364813434857005,Watermarked +3142,0.5901639344262295,0.44999999999999996,-3.585966147227154,0.03712903167578977,Watermarked +3143,0.6065573770491803,0.4291666666666667,-4.10299965843394,0.02619990938534542,Watermarked +3144,0.5737704918032787,0.37916666666666665,-5.466397186258622,0.01203293462437204,Watermarked +3145,0.48333333333333334,0.3855191256830601,-3.236738871470941,0.047969557784140165,Watermarked +3146,0.6666666666666666,0.38982240437158466,-7.304853191112693,0.005297682541612248,Watermarked +3147,0.8360655737704918,0.34583333333333327,-17.804840632796406,0.00038631936941714036,Watermarked +3148,0.5901639344262295,0.3708333333333333,-5.272729644436734,0.01329852584746764,Watermarked +3149,0.4666666666666667,0.3483606557377049,-4.11223200027819,0.026045241625233456,Watermarked +3150,0.4918032786885246,0.38750000000000007,-4.357648863022137,0.022333101933145045,Watermarked +3151,0.639344262295082,0.32916666666666666,-6.767511177347243,0.006592619506710128,Watermarked +3152,0.5333333333333333,0.4186475409836065,-2.434445731879166,0.0929600077713279,Watermarked +3153,0.5409836065573771,0.36666666666666664,-5.1238506750677635,0.0143919153374869,Watermarked +3154,0.5666666666666667,0.41072404371584703,-10.34274767901109,0.0019281339723313756,Watermarked +3155,0.48333333333333334,0.35280054644808745,-9.808946709611767,0.0022521026539224693,Watermarked +3156,0.5333333333333333,0.43531420765027323,-2.293238524273031,0.10564172479014122,Watermarked +3157,0.5333333333333333,0.35273224043715845,-8.189147517586767,0.0038099469501537706,Watermarked +3158,0.6333333333333333,0.36898907103825135,-9.328181302832693,0.0026085416180980188,Watermarked +3159,0.5901639344262295,0.375,-3.3240845596280026,0.04491692010565002,Watermarked +3160,0.5333333333333333,0.38162568306010936,-5.53218920098128,0.011638997397509718,Watermarked +3161,0.5166666666666667,0.4438524590163935,-4.609409203433426,0.01920617859490499,Watermarked +3162,0.5573770491803278,0.35416666666666663,-3.463054126930755,0.040550141822148895,Watermarked +3163,0.55,0.3030054644808743,-3.9665722337998846,0.028629798248878405,Watermarked +3164,0.6333333333333333,0.37336065573770494,-8.550441690064975,0.003361422362271298,Watermarked +3165,0.4426229508196721,0.375,-4.685055463096139,0.01837881723814625,Watermarked +3166,0.6166666666666667,0.31127049180327865,-9.651006540622726,0.002361656973312143,Watermarked +3167,0.7166666666666667,0.38599726775956283,-15.465401308907632,0.0005873388348340652,Watermarked +3168,0.6333333333333333,0.2568306010928962,-10.419231568279397,0.0018868885475834876,Watermarked +3169,0.8032786885245902,0.37916666666666665,-8.376268479684917,0.0035683913912364033,Watermarked +3170,0.5166666666666667,0.3978825136612022,-2.035753205981991,0.1345958069792505,Watermarked +3171,0.6,0.3980874316939891,-7.600738322488857,0.004725867063471558,Watermarked +3172,0.7868852459016393,0.39166666666666666,-27.381546373206323,0.00010690928977807902,Watermarked +3173,0.38333333333333336,0.3278005464480874,-5.343021351720979,0.01281992383399354,Watermarked +3174,0.5833333333333334,0.41857923497267757,-3.3484587546107205,0.0441093025667818,Watermarked +3175,0.5166666666666667,0.33989071038251367,-4.377554579883222,0.02206287000445764,Watermarked +3176,0.5,0.39207650273224043,-6.083165423794337,0.025975157859805906,Watermarked +3177,0.7704918032786885,0.3208333333333333,-15.854306407643215,0.0005455614883016697,Watermarked +3178,0.5,0.28189890710382515,-8.369667988980005,0.003576562016483774,Watermarked +3179,0.5833333333333334,0.3903005464480874,-6.562635068330882,0.0071957310516421215,Watermarked +3180,0.6666666666666666,0.3730191256830601,-6.867274869817217,0.006322952397605397,Watermarked +3181,0.43333333333333335,0.32568306010928966,-4.191489361702128,0.052479362028526676,Watermarked +3182,0.6721311475409836,0.35000000000000003,-6.5653506061794005,0.0071872722304183664,Watermarked +3183,0.7,0.3524590163934426,-10.447778102751764,0.0018717942236663328,Watermarked +3184,0.6557377049180327,0.36249999999999993,-70.37704918032789,6.3221094755592405e-06,Watermarked +3185,0.5166666666666667,0.45621584699453555,-2.481511075867372,0.0891506613942868,Watermarked +3186,0.5666666666666667,0.4271174863387978,-3.4482809420111318,0.040987939768970645,Watermarked +3187,0.5901639344262295,0.3666666666666667,-23.226517386743268,0.000174834804970807,Watermarked +3188,0.4918032786885246,0.39583333333333337,-7.677595628415292,0.004590789402025528,Watermarked +3189,0.5166666666666667,0.3941256830601093,-3.7613782647305922,0.03285902039196703,Watermarked +3190,0.6229508196721312,0.3416666666666666,-13.4125186386734,0.0008960173086668277,Watermarked +3191,0.5333333333333333,0.3273224043715847,-3.665214293431724,0.0351170707372584,Watermarked +3192,0.6333333333333333,0.52698087431694,-2.6670409685182177,0.07588150981935955,Watermarked +3193,0.6557377049180327,0.37916666666666665,-22.125683060109264,0.00020211389888430634,Watermarked +3194,0.5166666666666667,0.3484289617486339,-2.8306761052244935,0.06615277677623232,Watermarked +3195,0.48333333333333334,0.3771857923497267,-2.618745744139447,0.07908677590411582,Watermarked +3196,0.5166666666666667,0.3983606557377049,-3.9828547448399347,0.02832495388510772,Watermarked +3197,0.45901639344262296,0.3708333333333333,-2.3515482695810577,0.10016489140788615,Watermarked +3198,0.5166666666666667,0.3691256830601093,-7.521674420991118,0.004870350805489641,Watermarked +3199,0.6,0.3650956284153005,-41.98171043120708,2.974431577617351e-05,Watermarked +3200,0.5333333333333333,0.3980191256830601,-3.2824011075555344,0.04634202450941883,Watermarked +3201,0.39344262295081966,0.35416666666666663,-3.142076502732241,0.051580525827867194,Watermarked +3202,0.6065573770491803,0.4041666666666667,-11.78086978660089,0.0013145795310678767,Watermarked +3203,0.639344262295082,0.38333333333333336,-8.021840834286337,0.004044555451710541,Watermarked +3204,0.6666666666666666,0.332103825136612,-15.850883671738368,0.0005459116401288157,Watermarked +3205,0.48333333333333334,0.37329234972677594,-3.6587848750156016,0.03527502614042613,Watermarked +3206,0.36666666666666664,0.29863387978142075,-2.4701740765596973,0.09005051324793042,Watermarked +3207,0.7213114754098361,0.3666666666666667,-8.810214059275827,0.0030812498509113165,Watermarked +3208,0.4666666666666667,0.4105874316939891,-1.5057124818059264,0.22921714041813998,Watermarked +3209,0.5409836065573771,0.22083333333333333,-6.909375426419404,0.0062135098686104855,Watermarked +3210,0.5333333333333333,0.41509562841530057,-6.219125997443245,0.00838045426704938,Watermarked +3211,0.55,0.38995901639344266,-5.020631036144959,0.015219613947787137,Watermarked +3212,0.5245901639344263,0.3041666666666667,-5.783528726099863,0.010280619668641678,Watermarked +3213,0.5166666666666667,0.34378415300546444,-3.106070198750089,0.05304373654077345,Watermarked +3214,0.5,0.34433060109289615,-4.489204827567853,0.020623088486061487,Watermarked +3215,0.45901639344262296,0.29166666666666663,-15.55542491509537,0.000577299561388648,Watermarked +3216,0.5333333333333333,0.3945355191256831,-2.804910878658928,0.0675771021782272,Watermarked +3217,0.5833333333333334,0.3105191256830601,-4.772347925976209,0.017480680770223765,Watermarked +3218,0.7377049180327869,0.3375,-19.209836065573764,0.0003080910681863436,Watermarked +3219,0.5737704918032787,0.3833333333333333,-4.318708814548055,0.022874240057608075,Watermarked +3220,0.55,0.3239754098360656,-5.08778567271395,0.014674286692950959,Watermarked +3221,0.5333333333333333,0.3812158469945355,-2.9098120190283097,0.06200608865891807,Watermarked +3222,0.35,0.2569672131147541,-3.7827690734466466,0.03238222739381364,Watermarked +3223,0.7377049180327869,0.2875,-16.3510804808083,0.0004977534397666609,Watermarked +3224,0.4426229508196721,0.41250000000000003,-1.9095681132275377,0.15219065910307153,Watermarked +3225,0.5573770491803278,0.4041666666666667,-4.560818191534335,0.019763254051599725,Watermarked +3226,0.7377049180327869,0.35416666666666663,-13.929800773742766,0.0008010077875496463,Watermarked +3227,0.639344262295082,0.4083333333333333,-7.498639437160465,0.004913539903160214,Watermarked +3228,0.43333333333333335,0.36919398907103823,-2.8757941403931446,0.06374762528542408,Watermarked +3229,0.4666666666666667,0.28169398907103826,-3.4441072444884826,0.04111271435550431,Watermarked +3230,0.7166666666666667,0.36133879781420764,-10.501771543064775,0.0018436795460142676,Watermarked +3231,0.6557377049180327,0.32916666666666666,-20.7021431957843,0.00024648403432591493,Watermarked +3232,0.6666666666666666,0.3896857923497268,-7.435895472354905,0.005033772413915748,Watermarked +3233,0.6721311475409836,0.3541666666666667,-5.824341929189313,0.01007998096990893,Watermarked +3234,0.5333333333333333,0.37752732240437153,-6.922734884968511,0.0061793032033680305,Watermarked +3235,0.7377049180327869,0.41666666666666663,-9.080332982450201,0.002821767606719206,Watermarked +3236,0.55,0.38155737704918036,-3.5810708052340003,0.03725808735441134,Watermarked +3237,0.5245901639344263,0.37916666666666665,-4.242857392252246,0.02397794903308461,Watermarked +3238,0.6557377049180327,0.3625,-6.133284818784408,0.008715961470550591,Watermarked +3239,0.5,0.29494535519125686,-3.242117432574401,0.04777413102072636,Watermarked +3240,0.5081967213114754,0.38749999999999996,-2.474836034730356,0.08967913937363099,Watermarked +3241,0.5,0.37363387978142076,-3.3164206595199035,0.04517472690275175,Watermarked +3242,0.6666666666666666,0.4066256830601093,-18.30718715197799,0.0003555990078420392,Watermarked +3243,0.6721311475409836,0.3375,-11.473067915690864,0.0014212819594170815,Watermarked +3244,0.55,0.31058743169398906,-4.4611858896576715,0.020972683531277587,Watermarked +3245,0.5166666666666667,0.4523224043715847,-3.6924428701682177,0.034458070848123835,Watermarked +3246,0.8524590163934426,0.3541666666666667,-20.02461614309747,0.00027220292398855736,Watermarked +3247,0.6885245901639344,0.3416666666666667,-15.731996671646355,0.0005582617929085287,Watermarked +3248,0.5833333333333334,0.4019808743169399,-3.3737593587415304,0.04329038177941591,Watermarked +3249,0.4666666666666667,0.36092896174863387,-4.924844065432519,0.016044059231596925,Watermarked +3250,0.5901639344262295,0.3666666666666667,-4.511917593907792,0.020345223247711267,Watermarked +3251,0.45,0.37759562841530053,-3.483025090320139,0.039967726509492464,Watermarked +3252,0.6885245901639344,0.35833333333333334,-14.9760677256279,0.0006461747777244088,Watermarked +3253,0.47540983606557374,0.35,-4.022062624190254,0.027607848701928988,Watermarked +3254,0.6166666666666667,0.47691256830601103,-3.022304945858794,0.0566554359290112,Watermarked +3255,0.4666666666666667,0.3237704918032787,-4.585573326772214,0.019476865011623285,Watermarked +3256,0.7377049180327869,0.37916666666666665,-6.896820529493841,0.006245884135338378,Watermarked +3257,0.5245901639344263,0.38749999999999996,-2.4730386527896364,0.0898220971946934,Watermarked +3258,0.48333333333333334,0.373155737704918,-3.7564520938598576,0.03297009515071729,Watermarked +3259,0.43333333333333335,0.3278005464480874,-5.076865217882905,0.014761211511120415,Watermarked +3260,0.48333333333333334,0.31086065573770494,-2.446306172576996,0.0919815932829521,Watermarked +3261,0.6065573770491803,0.35833333333333334,-7.2243808310002775,0.005468843291212087,Watermarked +3262,0.5166666666666667,0.35252732240437157,-4.378773516969542,0.022046460036578352,Watermarked +3263,0.48333333333333334,0.37329234972677594,-2.5720050130375935,0.08234949684139341,Watermarked +3264,0.6721311475409836,0.36666666666666664,-5.484666087385246,0.011921820751493945,Watermarked +3265,0.65,0.3280054644808743,-14.159199715025988,0.0007631512846613169,Watermarked +3266,0.7213114754098361,0.35416666666666663,-6.623110492647877,0.007010408974038976,Watermarked +3267,0.5409836065573771,0.35833333333333334,-4.326302046694481,0.022767398907893536,Watermarked +3268,0.65,0.332103825136612,-10.547062196561422,0.0018205268185872753,Watermarked +3269,0.5737704918032787,0.32916666666666666,-4.41253184635759,0.021598198496940495,Watermarked +3270,0.7213114754098361,0.29166666666666663,-26.924962031836227,0.00011242244042797529,Watermarked +3271,0.5333333333333333,0.3239071038251366,-6.101922495764141,0.008842933069258837,Watermarked +3272,0.6666666666666666,0.36448087431693993,-5.276046667403815,0.013275421234137245,Watermarked +3273,0.48333333333333334,0.3691256830601093,-2.611497078469241,0.07958218244247245,Watermarked +3274,0.6333333333333333,0.3605191256830601,-7.086477957097316,0.0057795013156923015,Watermarked +3275,0.6666666666666666,0.3151639344262295,-6.220646342838296,0.008374666202957234,Watermarked +3276,0.6557377049180327,0.4125,-4.719504703575856,0.01801737147532206,Watermarked +3277,0.5245901639344263,0.30000000000000004,-4.334146739196724,0.022657696393709254,Watermarked +3278,0.5409836065573771,0.4,-2.990713926985726,0.05809751373410958,Watermarked +3279,0.5081967213114754,0.30416666666666664,-5.440801457194902,0.012190900140945336,Watermarked +3280,0.55,0.3399590163934426,-4.334577337946331,0.022651694585833295,Watermarked +3281,0.4666666666666667,0.3108606557377049,-2.3956053387350558,0.0962542828406371,Watermarked +3282,0.5166666666666667,0.4145491803278689,-2.833861411958958,0.0659792994727473,Watermarked +3283,0.5737704918032787,0.22916666666666669,-8.901067221690772,0.0029905765083464166,Watermarked +3284,0.4426229508196721,0.33749999999999997,-2.4314657198035565,0.09320783628411884,Watermarked +3285,0.5,0.27329234972677596,-4.855988855019134,0.016672924204011234,Watermarked +3286,0.6,0.25341530054644806,-13.539511357945873,0.0008713612072207959,Watermarked +3287,0.7049180327868853,0.3208333333333333,-11.206006160936766,0.0015233714589831572,Watermarked +3288,0.639344262295082,0.3416666666666667,-3.5780996247983943,0.037336695763226686,Watermarked +3289,0.639344262295082,0.29583333333333334,-13.315684846477943,0.0009154444688870522,Watermarked +3290,0.6557377049180327,0.3875,-11.688828790772387,0.0013453380246460723,Watermarked +3291,0.5666666666666667,0.3316939890710382,-5.9679306695524,0.009413595633304362,Watermarked +3292,0.5737704918032787,0.4,-3.3534274040500196,0.04394693731715465,Watermarked +3293,0.5666666666666667,0.3896857923497268,-3.9226085660313683,0.029474240628051502,Watermarked +3294,0.5409836065573771,0.3708333333333333,-2.9809200108781417,0.058553923379340976,Watermarked +3295,0.48333333333333334,0.3403005464480874,-2.952162851632012,0.05992028137117902,Watermarked +3296,0.7166666666666667,0.2653688524590164,-16.038628368958843,0.0005271380369548524,Watermarked +3297,0.4262295081967213,0.3083333333333333,-3.8269224330393166,0.03142577924163457,Watermarked +3298,0.5666666666666667,0.4144125683060109,-3.4211013109598394,0.041809228433188725,Watermarked +3299,0.639344262295082,0.32916666666666666,-4.849227812278989,0.016736391339350645,Watermarked +3300,0.5,0.3152322404371585,-17.69183502777838,0.00039371274155216776,Watermarked +3301,0.5901639344262295,0.35833333333333334,-11.68659984883048,0.001346094689122379,Watermarked +3302,0.5409836065573771,0.4,-2.873384998124319,0.06387326471495713,Watermarked +3303,0.48333333333333334,0.4063524590163934,-1.9862461743452617,0.14119767735260078,Watermarked +3304,0.5666666666666667,0.31898907103825136,-5.827473612902438,0.01006479830694792,Watermarked +3305,0.5166666666666667,0.3939207650273224,-3.0276117316398676,0.05641761781506673,Watermarked +3306,0.48333333333333334,0.3358606557377049,-4.734425236222974,0.017863689846178848,Watermarked +3307,0.6166666666666667,0.3656420765027322,-5.128769703376435,0.014353952607178531,Watermarked +3308,0.48333333333333334,0.3566939890710382,-7.883059676687519,0.004253852045425947,Watermarked +3309,0.6721311475409836,0.3416666666666667,-9.434713700912411,0.0025234427155974874,Watermarked +3310,0.6065573770491803,0.3041666666666667,-10.096584056706222,0.0020692799372390666,Watermarked +3311,0.4918032786885246,0.43333333333333335,-3.508196721311475,0.0392487316994922,Watermarked +3312,0.5409836065573771,0.35,-4.813749731486455,0.01707463310186129,Watermarked +3313,0.5245901639344263,0.29166666666666663,-7.216870606867377,0.005485187056437409,Watermarked +3314,0.5737704918032787,0.3375,-6.518811541138283,0.007334069087384167,Watermarked +3315,0.5409836065573771,0.22499999999999998,-7.68678962084268,0.004574973719624272,Watermarked +3316,0.6885245901639344,0.39166666666666666,-9.409277800552035,0.0025434230249447316,Watermarked +3317,0.5245901639344263,0.3541666666666667,-21.360184505383586,0.00022451203065569885,Watermarked +3318,0.47540983606557374,0.2125,-6.081034149138327,0.008928849971299894,Watermarked +3319,0.5333333333333333,0.3603142076502732,-2.9539865796064393,0.05983245019134663,Watermarked +3320,0.6885245901639344,0.33333333333333337,-8.952610373293831,0.0029407013393304916,Watermarked +3321,0.5,0.39849726775956285,-6.830325200725934,0.00642110575024065,Watermarked +3322,0.5245901639344263,0.4,-3.737704918032786,0.03339722912081489,Watermarked +3323,0.47540983606557374,0.2791666666666667,-11.423030339825978,0.00143970284382488,Watermarked +3324,0.65,0.39419398907103825,-4.965110212591529,0.01569056902102536,Watermarked +3325,0.55,0.3604508196721311,-3.3681723780241897,0.043469547365275946,Watermarked +3326,0.5573770491803278,0.375,-3.3374687113774946,0.04447114763467144,Watermarked +3327,0.65,0.34002732240437156,-9.8986749455488,0.0021928507964472117,Watermarked +3328,0.5081967213114754,0.4041666666666667,-3.473480352013456,0.040244742555193926,Watermarked +3329,0.5166666666666667,0.33189890710382514,-10.490828508285672,0.001849332165580751,Watermarked +3330,0.6666666666666666,0.3065573770491803,-8.898873673993194,0.002992723821402328,Watermarked +3331,0.6885245901639344,0.2708333333333333,-11.732895329579321,0.0013304938857159276,Watermarked +3332,0.7049180327868853,0.4,-10.562670498616757,0.0018126371846850279,Watermarked +3333,0.48333333333333334,0.4628415300546448,-5.357142857142865,0.11748359353956558,Watermarked +3334,0.5666666666666667,0.37336065573770494,-4.6106699178095605,0.019191997918002076,Watermarked +3335,0.5333333333333333,0.3980191256830601,-2.348482665780014,0.10044412446307645,Watermarked +3336,0.6557377049180327,0.37083333333333335,-5.4804035870677845,0.011947625433148852,Watermarked +3337,0.47540983606557374,0.3333333333333333,-3.1843068128816943,0.04992864292949258,Watermarked +3338,0.6557377049180327,0.4083333333333333,-4.59932645244697,0.01932008137256461,Watermarked +3339,0.639344262295082,0.35833333333333334,-10.845912217186935,0.0016770220983140147,Watermarked +3340,0.4918032786885246,0.2833333333333333,-5.896420481369791,0.009738001288109581,Watermarked +3341,0.6166666666666667,0.3812158469945356,-3.7975738512734774,0.032057418473780255,Watermarked +3342,0.6721311475409836,0.4291666666666667,-30.452174863987732,7.779154477118327e-05,Watermarked +3343,0.5573770491803278,0.22499999999999998,-23.027757458005887,0.00017938051259614428,Watermarked +3344,0.4426229508196721,0.29583333333333334,-3.202682563338301,0.04923073087668093,Watermarked +3345,0.6666666666666666,0.4817622950819672,-4.70814887470562,0.018135489628524907,Watermarked +3346,0.6721311475409836,0.3583333333333333,-9.722669668020147,0.0023110927865077316,Watermarked +3347,0.5573770491803278,0.32083333333333336,-4.437542626227876,0.021273677300139238,Watermarked +3348,0.6833333333333333,0.4314890710382514,-9.676632078012846,0.0023434092465502462,Watermarked +3349,0.5081967213114754,0.3208333333333333,-3.604111013983922,0.03665562896226882,Watermarked +3350,0.7704918032786885,0.3208333333333333,-21.583606557377053,0.00021764701504013405,Watermarked +3351,0.5901639344262295,0.41666666666666663,-3.2125451549484705,0.04886123097830394,Watermarked +3352,0.5573770491803278,0.3208333333333333,-8.591077485213033,0.0033154323787321117,Watermarked +3353,0.5333333333333333,0.38545081967213113,-2.9267563949002504,0.06116082615925972,Watermarked +3354,0.4666666666666667,0.3236338797814207,-13.636558407121642,0.000853123683257044,Watermarked +3355,0.6166666666666667,0.36516393442622946,-8.46907603191196,0.0034560758139621692,Watermarked +3356,0.5,0.36065573770491804,-3.1934829897182784,0.04957857968168316,Watermarked +3357,0.6,0.3235655737704918,-28.52854660180471,9.456145255444976e-05,Watermarked +3358,0.5409836065573771,0.4291666666666667,-2.0643127364438847,0.13095346493752535,Watermarked +3359,0.45,0.38148907103825136,-1.9632892320372335,0.1443881711602152,Watermarked +3360,0.65,0.3028005464480874,-21.112622602464683,0.00023245973293610858,Watermarked +3361,0.5081967213114754,0.37083333333333335,-3.54807654763531,0.038143015996558144,Watermarked +3362,0.5333333333333333,0.38155737704918036,-4.0869440858831725,0.026471716786619498,Watermarked +3363,0.6166666666666667,0.3941256830601093,-5.534321299197799,0.011626515414883042,Watermarked +3364,0.5081967213114754,0.29583333333333334,-9.689747679740668,0.002334141978546938,Watermarked +3365,0.45901639344262296,0.32083333333333336,-2.9505777976622465,0.059996749206154006,Watermarked +3366,0.5737704918032787,0.3375,-3.780327868852458,0.03243619070712851,Watermarked +3367,0.5573770491803278,0.375,-3.8488075064770464,0.03096513128016824,Watermarked +3368,0.45901639344262296,0.3416666666666667,-4.878136536726772,0.01646720054964036,Watermarked +3369,0.4666666666666667,0.3066256830601093,-3.3297523730648932,0.044727459212230945,Watermarked +3370,0.43333333333333335,0.35252732240437157,-2.5701626159312543,0.08248147008467596,Watermarked +3371,0.639344262295082,0.36666666666666664,-5.403746742275941,0.012424423736674949,Watermarked +3372,0.6166666666666667,0.41857923497267757,-2.837173206379461,0.06579953452896789,Watermarked +3373,0.43333333333333335,0.2981785063752277,-9.104294478527612,0.011850419592490571,Watermarked +3374,0.55,0.3736338797814208,-3.6168425013226613,0.03632805703920555,Watermarked +3375,0.7377049180327869,0.3458333333333333,-9.297077569799361,0.0026341027851884773,Watermarked +3376,0.5901639344262295,0.4041666666666667,-3.0118665582406012,0.05712690752613409,Watermarked +3377,0.45,0.3567622950819672,-2.922322679137377,0.061380599938098745,Watermarked +3378,0.5666666666666667,0.3567622950819672,-8.560154259731128,0.003350353702966942,Watermarked +3379,0.4426229508196721,0.3958333333333333,-2.1349234997177082,0.12243934625158044,Watermarked +3380,0.5833333333333334,0.3274590163934426,-7.792967812252545,0.004397428835019784,Watermarked +3381,0.4262295081967213,0.35833333333333334,-2.3199911010934016,0.10308502717775488,Watermarked +3382,0.6166666666666667,0.38155737704918036,-4.19677462214937,0.02468223424934266,Watermarked +3383,0.5901639344262295,0.35833333333333334,-9.637017116211096,0.002371698207192692,Watermarked +3384,0.5,0.28724954462659374,-20.478625837697965,0.0023760107227863043,Watermarked +3385,0.6333333333333333,0.39419398907103825,-23.001886718608972,0.00017998371795611563,Watermarked +3386,0.639344262295082,0.32916666666666666,-6.6941484555943065,0.006800654936853843,Watermarked +3387,0.7049180327868853,0.37916666666666665,-10.355237394824906,0.001921317535318784,Watermarked +3388,0.55,0.4269808743169399,-3.4667216700978054,0.04044237885506118,Watermarked +3389,0.5,0.3236338797814208,-3.3358653417919237,0.04452425266445473,Watermarked +3390,0.45,0.28565573770491803,-2.3900038580000214,0.0967410413896404,Watermarked +3391,0.7,0.3323087431693989,-10.546963974844278,0.001820576611444715,Watermarked +3392,0.5737704918032787,0.39166666666666666,-12.616523095570006,0.0010737810472414555,Watermarked +3393,0.4166666666666667,0.331511839708561,-2.9439949370436143,0.09860789571567317,Watermarked +3394,0.819672131147541,0.325,-13.385460331466769,0.0009013902444265222,Watermarked +3395,0.48333333333333334,0.19508196721311474,-6.738703907451196,0.00667329610025673,Watermarked +3396,0.7213114754098361,0.31666666666666665,-9.776873289560362,0.0022737966110181968,Watermarked +3397,0.5333333333333333,0.2782103825136612,-6.842713959859326,0.006387974012842435,Watermarked +3398,0.6833333333333333,0.31058743169398906,-7.0022122373829845,0.005980846181537664,Watermarked +3399,0.65,0.2774590163934426,-7.817198672088378,0.004358191490211607,Watermarked +3400,0.6065573770491803,0.37916666666666665,-3.7510920776385794,0.033091499306512666,Watermarked +3401,0.5245901639344263,0.41666666666666663,-5.0158308909899345,0.015259597669989054,Watermarked +3402,0.5,0.3396174863387978,-3.020014584938689,0.05675846509350086,Watermarked +3403,0.6065573770491803,0.35416666666666663,-6.420806830530328,0.0076563407117634,Watermarked +3404,0.55,0.42704918032786887,-3.2041931426214503,0.04917390886551068,Watermarked +3405,0.45,0.38995901639344266,-3.129964263210284,0.0520669925513877,Watermarked +3406,0.7,0.44002732240437165,-7.029706101430858,0.005914150507692506,Watermarked +3407,0.4166666666666667,0.3360655737704918,-6.662521026761018,0.006893006911496739,Watermarked +3408,0.6,0.41120218579234974,-4.744558099428093,0.01776029146137704,Watermarked +3409,0.7377049180327869,0.3916666666666667,-11.23243861284888,0.0015128416306198192,Watermarked +3410,0.65,0.4360655737704918,-4.896859345148878,0.01629585933682827,Watermarked +3411,0.5,0.41065573770491803,-3.340359673429475,0.04437559930810233,Watermarked +3412,0.5737704918032787,0.4083333333333333,-3.970491803278687,0.02855603110389197,Watermarked +3413,0.639344262295082,0.375,-4.954036545154963,0.015786763366299392,Watermarked +3414,0.6,0.439275956284153,-2.5509371831068104,0.0838742637314232,Watermarked +3415,0.6721311475409836,0.37083333333333335,-12.587812138042343,0.001081035521070716,Watermarked +3416,0.6721311475409836,0.42083333333333334,-13.599864280438059,0.000859959220110807,Watermarked +3417,0.6166666666666667,0.49330601092896176,-2.4421197772396783,0.09232550216594695,Watermarked +3418,0.43333333333333335,0.3809653916211293,-1.5855147752916146,0.25372930058134713,Watermarked +3419,0.7213114754098361,0.32083333333333336,-18.273075151598423,0.0003575799805904938,Watermarked +3420,0.6,0.4062841530054645,-6.551255287390875,0.007231321491454494,Watermarked +3421,0.5409836065573771,0.425,-3.0865517657726187,0.05385879831750464,Watermarked +3422,0.819672131147541,0.3875,-15.69614416660204,0.000562059046312007,Watermarked +3423,0.65,0.4394808743169399,-6.100988988496524,0.008846749505079436,Watermarked +3424,0.6833333333333333,0.4354508196721312,-8.022565688606122,0.004043498406787921,Watermarked +3425,0.639344262295082,0.45833333333333337,-7.5244830164877445,0.004865119180322468,Watermarked +3426,0.5333333333333333,0.3650273224043715,-11.968876772692552,0.0012545933074113616,Watermarked +3427,0.6885245901639344,0.32916666666666666,-6.2514540023238165,0.008258502009868794,Watermarked +3428,0.5166666666666667,0.38176229508196724,-9.8479960675198,0.0022260589594432483,Watermarked +3429,0.5333333333333333,0.40710382513661203,-3.2566855663164747,0.047249843238845575,Watermarked +3430,0.5333333333333333,0.44781420765027324,-2.777298311206574,0.0691462057873957,Watermarked +3431,0.639344262295082,0.38749999999999996,-4.973957462489661,0.015614263014698614,Watermarked +3432,0.5,0.4034608378870674,-5.908250072050177,0.027472183908806763,Watermarked +3433,0.6333333333333333,0.41933060109289616,-6.341832402564622,0.007929704462389898,Watermarked +3434,0.5333333333333333,0.4229508196721311,-4.809523809523813,0.017115512915415635,Watermarked +3435,0.43333333333333335,0.3479052823315118,-5.862500000000004,0.02788479615419901,Watermarked +3436,0.5901639344262295,0.44166666666666665,-6.172915501073344,0.008558915834601189,Watermarked +3437,0.6065573770491803,0.425,-2.836410863828114,0.06584086048198345,Watermarked +3438,0.5666666666666667,0.40607923497267756,-3.337367049030468,0.044474512395329985,Watermarked +3439,0.7377049180327869,0.3125,-11.156636603647863,0.0015433006224139084,Watermarked +3440,0.5409836065573771,0.39999999999999997,-2.3612943568800553,0.09928340428129473,Watermarked +3441,0.48333333333333334,0.3940573770491803,-7.029354227468431,0.005914997890496912,Watermarked +3442,0.5833333333333334,0.4853142076502732,-3.218376091512211,0.04864442449363043,Watermarked +3443,0.4,0.3277322404371585,-7.5308054776588165,0.004853369315234331,Watermarked +3444,0.55,0.4401639344262295,-2.2348801205435334,0.11149003001722149,Watermarked +3445,0.7704918032786885,0.38333333333333336,-9.35437505152399,0.0025872696994375557,Watermarked +3446,0.4666666666666667,0.35273224043715845,-2.771421867769551,0.06948596977369918,Watermarked +3447,0.6065573770491803,0.3916666666666667,-10.246669168835966,0.0019816576756549577,Watermarked +3448,0.6,0.3605191256830601,-5.027382355038384,0.015163608282268311,Watermarked +3449,0.5166666666666667,0.4354508196721312,-2.2311148149182007,0.11188060441585682,Watermarked +3450,0.55,0.385655737704918,-7.0347669547746365,0.005901980635791814,Watermarked +3451,0.4918032786885246,0.38749999999999996,-2.929866094565433,0.06100726967849578,Watermarked +3452,0.7213114754098361,0.35,-20.62605763263766,0.0002492064346505477,Watermarked +3453,0.5409836065573771,0.375,-3.46728445878972,0.040425874782353266,Watermarked +3454,0.7049180327868853,0.3833333333333333,-5.862266729790863,0.00989811735639771,Watermarked +3455,0.5409836065573771,0.3875,-4.478018111973132,0.02076175387541808,Watermarked +3456,0.48333333333333334,0.40225409836065573,-3.1045263606689577,0.05310763670073003,Watermarked +3457,0.55,0.39371584699453555,-2.5092989392309506,0.08699122587945884,Watermarked +3458,0.6885245901639344,0.3458333333333333,-7.3958473467775,0.005112549560971972,Watermarked +3459,0.45901639344262296,0.39999999999999997,-2.7428341074780405,0.07116877964790087,Watermarked +3460,0.6065573770491803,0.4083333333333333,-3.7454630727011047,0.03321961329360707,Watermarked +3461,0.55,0.3815573770491803,-5.86945467255433,0.009864134442699606,Watermarked +3462,0.5409836065573771,0.35,-2.4617916243617888,0.09072301062363927,Watermarked +3463,0.5245901639344263,0.3666666666666667,-4.8396027027399,0.016827287331982445,Watermarked +3464,0.5166666666666667,0.38155737704918036,-4.355025558660401,0.02236903359688658,Watermarked +3465,0.47540983606557374,0.39999999999999997,-3.694312398951678,0.03441340625590583,Watermarked +3466,0.6229508196721312,0.375,-8.04851009961339,0.004005903148525875,Watermarked +3467,0.5245901639344263,0.4166666666666667,-3.9653624934399807,0.028652615314166956,Watermarked +3468,0.5409836065573771,0.3541666666666667,-5.938681961594909,0.009544543156865669,Watermarked +3469,0.6557377049180327,0.4,-3.9840670207259876,0.028302424799336666,Watermarked +3470,0.6333333333333333,0.3528688524590164,-12.365301190960757,0.001139527243227325,Watermarked +3471,0.5081967213114754,0.37083333333333335,-7.995723638492874,0.004082886586878434,Watermarked +3472,0.6229508196721312,0.5458333333333334,-3.518727928731812,0.03895281371803606,Watermarked +3473,0.6166666666666667,0.3894125683060109,-4.405715742078374,0.021687749143792793,Watermarked +3474,0.5409836065573771,0.36250000000000004,-3.7715052757850085,0.032632179601749925,Watermarked +3475,0.5,0.360724043715847,-4.619815242020446,0.01908953586981084,Watermarked +3476,0.55,0.41475409836065574,-5.369818568286147,0.012643397039975352,Watermarked +3477,0.6833333333333333,0.4059426229508197,-4.851990369531891,0.016710420799139213,Watermarked +3478,0.5666666666666667,0.4314207650273224,-5.6998934006029405,0.01070840976220116,Watermarked +3479,0.55,0.45642076502732243,-8.972601513715789,0.0029216534818719115,Watermarked +3480,0.7704918032786885,0.3208333333333333,-14.29409263370358,0.0007419958228994995,Watermarked +3481,0.7213114754098361,0.4166666666666667,-9.335911788157421,0.002602239684135932,Watermarked +3482,0.55,0.3610655737704918,-3.939648677119553,0.029143186739241125,Watermarked +3483,0.5333333333333333,0.4394808743169399,-2.7199087833206432,0.07255497251636857,Watermarked +3484,0.6,0.32766393442622954,-7.616943405099432,0.004696954329420175,Watermarked +3485,0.6,0.41454918032786886,-4.203244682830465,0.024581754588030094,Watermarked +3486,0.6065573770491803,0.36666666666666664,-8.088416484282563,0.003948972398783288,Watermarked +3487,0.5666666666666667,0.35601092896174863,-2.719036894075117,0.07260835021632396,Watermarked +3488,0.65,0.3983606557377049,-10.650424052370854,0.001769113584875861,Watermarked +3489,0.55,0.43558743169398906,-2.9888817184112546,0.058182556819309464,Watermarked +3490,0.6,0.3937841530054645,-4.000584919681885,0.027997738989114628,Watermarked +3491,0.639344262295082,0.3958333333333333,-5.43407451169032,0.012232865280609554,Watermarked +3492,0.7213114754098361,0.5166666666666667,-5.7882292798767665,0.010257246511227655,Watermarked +3493,0.55,0.3567622950819672,-20.143694223280193,0.00026743218087057065,Watermarked +3494,0.48333333333333334,0.3688524590163934,-2.2635535190344163,0.10856902810055359,Watermarked +3495,0.6229508196721312,0.3791666666666667,-5.261267526539542,0.013378770206243135,Watermarked +3496,0.5833333333333334,0.4192622950819672,-4.998024144164243,0.01540912351567112,Watermarked +3497,0.6,0.4185792349726776,-4.200526985325906,0.024623895827933763,Watermarked +3498,0.6,0.3777322404371585,-3.6747652949491756,0.034884091484984066,Watermarked +3499,0.6885245901639344,0.3916666666666667,-27.593419053471173,0.00010447312619049549,Watermarked +3500,0.6229508196721312,0.41666666666666663,-5.199400639874496,0.013822982018819301,Watermarked +3501,0.6333333333333333,0.46427595628415297,-4.318179445331662,0.022881712741708818,Watermarked +3502,0.48333333333333334,0.3237704918032787,-4.892471154105429,0.016335808762118268,Watermarked +3503,0.5245901639344263,0.39166666666666666,-3.8686420190221638,0.030555105638903693,Watermarked +3504,0.5666666666666667,0.37766393442622953,-3.0183385183635814,0.05683401011881442,Watermarked +3505,0.5666666666666667,0.4062158469945355,-3.358719055760332,0.043774850277544984,Watermarked +3506,0.5573770491803278,0.43333333333333335,-6.89052992289735,0.006262188755552419,Watermarked +3507,0.6065573770491803,0.42083333333333334,-8.914754098360653,0.00297722406530442,Watermarked +3508,0.5245901639344263,0.3416666666666666,-7.603986987982712,0.004720052083609356,Watermarked +3509,0.45,0.3691939890710383,-1.986395773541501,0.14117716016377596,Watermarked +3510,0.55,0.360724043715847,-4.946082903302941,0.01585632915848264,Watermarked +3511,0.5737704918032787,0.36666666666666664,-4.588689352245944,0.019441198543417786,Watermarked +3512,0.6885245901639344,0.3375,-12.748930752673918,0.0010411491892625435,Watermarked +3513,0.7377049180327869,0.32499999999999996,-12.2541580554229,0.0011703222583523416,Watermarked +3514,0.6666666666666666,0.3778005464480874,-4.797285861708767,0.017234618038032837,Watermarked +3515,0.7666666666666667,0.34405737704918027,-14.172745497720056,0.0007609909456212724,Watermarked +3516,0.6557377049180327,0.37083333333333335,-5.3888664006939155,0.012519846650421048,Watermarked +3517,0.6721311475409836,0.4041666666666667,-8.518263011092074,0.003398441269028304,Watermarked +3518,0.6721311475409836,0.3458333333333333,-9.713343067748339,0.002317591752507357,Watermarked +3519,0.6229508196721312,0.30833333333333335,-6.506748841466283,0.007372760445320987,Watermarked +3520,0.5245901639344263,0.42083333333333334,-2.8626986790948408,0.06443431083077424,Watermarked +3521,0.6166666666666667,0.4065573770491804,-6.594241817524086,0.007098081515958769,Watermarked +3522,0.5666666666666667,0.34419398907103826,-9.378588970336171,0.0025678094646998427,Watermarked +3523,0.5833333333333334,0.3485655737704918,-3.9566860100053627,0.028816950912038987,Watermarked +3524,0.6166666666666667,0.3726775956284153,-3.463270042711585,0.040543787476660896,Watermarked +3525,0.5166666666666667,0.43586065573770494,-4.117687550148067,0.02595439887765405,Watermarked +3526,0.6065573770491803,0.42083333333333334,-2.7244712370027178,0.07227644833071428,Watermarked +3527,0.65,0.37315573770491806,-7.357526587712183,0.005189458410858861,Watermarked +3528,0.819672131147541,0.32083333333333336,-12.69043363555155,0.0010554010131434192,Watermarked +3529,0.5166666666666667,0.3372495446265938,-4.51546348372934,0.04570861042365545,Watermarked +3530,0.5573770491803278,0.3416666666666667,-3.2106696051862316,0.04893122743058373,Watermarked +3531,0.5245901639344263,0.3875,-2.707548854683142,0.07331622631866143,Watermarked +3532,0.5333333333333333,0.39398907103825137,-3.677608363112817,0.03481512208257183,Watermarked +3533,0.5666666666666667,0.4894808743169399,-2.350517220396054,0.1002587000234438,Watermarked +3534,0.5333333333333333,0.4025273224043716,-3.2715037607881903,0.04672400543382343,Watermarked +3535,0.5833333333333334,0.36441256830601093,-3.6162952233041508,0.036342061007907864,Watermarked +3536,0.6666666666666666,0.4438524590163935,-17.78543820658479,0.0003875755523465665,Watermarked +3537,0.639344262295082,0.3333333333333333,-5.158898896581134,0.014124257169801814,Watermarked +3538,0.6166666666666667,0.49760928961748635,-3.887955865194524,0.030162518753766708,Watermarked +3539,0.6333333333333333,0.4562841530054645,-9.590620075287022,0.0024054083955540014,Watermarked +3540,0.6229508196721312,0.4083333333333333,-3.897367233401582,0.029973563805789114,Watermarked +3541,0.6229508196721312,0.4,-6.306001458450521,0.008057963024016634,Watermarked +3542,0.5166666666666667,0.40628415300546444,-2.0963572160561217,0.1270050338262828,Watermarked +3543,0.5245901639344263,0.40416666666666673,-2.1561984129969214,0.12000461051054315,Watermarked +3544,0.5333333333333333,0.4229508196721311,-4.136976592067392,0.025636466240534804,Watermarked +3545,0.5833333333333334,0.42691256830601093,-3.084110518603815,0.053961847840379426,Watermarked +3546,0.5573770491803278,0.35833333333333334,-4.84204603440025,0.016804152704825513,Watermarked +3547,0.6,0.4023907103825136,-6.903695753068684,0.006228128127498301,Watermarked +3548,0.6885245901639344,0.37083333333333335,-22.322514291558807,0.00019683968706694644,Watermarked +3549,0.48333333333333334,0.32586520947176684,-3.2146877218511154,0.08465882990859916,Watermarked +3550,0.65,0.38995901639344266,-19.23389711083783,0.0003069437053761336,Watermarked +3551,0.6166666666666667,0.44815573770491807,-12.270797400644883,0.0011656421347428028,Watermarked +3552,0.6065573770491803,0.37083333333333335,-3.3590074850087963,0.043765495024786334,Watermarked +3553,0.6229508196721312,0.3208333333333333,-8.192450020709705,0.003805499272772318,Watermarked +3554,0.5833333333333334,0.5062841530054645,-5.14902114352389,0.01419903025134191,Watermarked +3555,0.6166666666666667,0.38545081967213113,-6.522157996146436,0.007323382596367344,Watermarked +3556,0.5245901639344263,0.3625,-5.036188941644216,0.015090956088189262,Watermarked +3557,0.5573770491803278,0.38333333333333336,-4.5941415869804265,0.019378995099920374,Watermarked +3558,0.5166666666666667,0.46045081967213114,-2.451392373266136,0.09156587040440396,Watermarked +3559,0.5409836065573771,0.4041666666666667,-3.3807925592909616,0.04306616942393178,Watermarked +3560,0.5409836065573771,0.49583333333333335,-2.6281319368691185,0.07845092410121779,Watermarked +3561,0.7166666666666667,0.38551912568306007,-7.359999679334292,0.0051844490931802715,Watermarked +3562,0.7166666666666667,0.31905737704918036,-9.174911614798257,0.002737770634718071,Watermarked +3563,0.6065573770491803,0.4083333333333333,-3.0623788546160213,0.054890208233557496,Watermarked +3564,0.5333333333333333,0.43041894353369764,-1.6922369290882864,0.23267507692719033,Watermarked +3565,0.6666666666666666,0.36489071038251364,-7.458228376445201,0.004990536830131611,Watermarked +3566,0.5166666666666667,0.39842896174863385,-14.529811045035604,0.000706861443434645,Watermarked +3567,0.5666666666666667,0.3861338797814208,-3.095250278326531,0.05349362654863495,Watermarked +3568,0.6885245901639344,0.3375,-8.553875315773226,0.003357503807386855,Watermarked +3569,0.5333333333333333,0.4355191256830601,-7.054729326958077,0.0058542974670971835,Watermarked +3570,0.6557377049180327,0.39583333333333326,-4.287445285495584,0.023321028120091257,Watermarked +3571,0.6333333333333333,0.38989071038251366,-11.606555225890991,0.0013736462375547754,Watermarked +3572,0.6166666666666667,0.40211748633879785,-3.9527199474578647,0.028892473194063042,Watermarked +3573,0.5081967213114754,0.3875,-2.3837761964250683,0.09728573796120057,Watermarked +3574,0.6557377049180327,0.3958333333333333,-5.180131981040665,0.013965256620241263,Watermarked +3575,0.639344262295082,0.48750000000000004,-5.206088992974237,0.01377403946898648,Watermarked +3576,0.45,0.3984289617486339,-3.252508058107846,0.047399438327312575,Watermarked +3577,0.6229508196721312,0.45833333333333326,-3.755595353518015,0.032989461923480595,Watermarked +3578,0.6721311475409836,0.43333333333333335,-5.352084912420385,0.012759859239265056,Watermarked +3579,0.55,0.3859289617486339,-6.327570523296936,0.007980431881706069,Watermarked +3580,0.6885245901639344,0.3583333333333333,-8.261956373376224,0.0037134622752403278,Watermarked +3581,0.6065573770491803,0.39583333333333337,-14.806483893669949,0.0006683885463905405,Watermarked +3582,0.5409836065573771,0.375,-6.0055128469177514,0.009248776088665693,Watermarked +3583,0.7333333333333333,0.3855874316939891,-13.52760363997253,0.0008736345306870901,Watermarked +3584,0.5901639344262295,0.375,-5.923439030264504,0.009613733901387398,Watermarked +3585,0.7704918032786885,0.32499999999999996,-15.651197399435246,0.0005668681892135681,Watermarked +3586,0.7704918032786885,0.3041666666666667,-10.064072122074942,0.0020889346595399047,Watermarked +3587,0.5409836065573771,0.3625,-4.290763799723821,0.02327307093304941,Watermarked +3588,0.6721311475409836,0.37916666666666665,-7.686877814143884,0.004574822356362937,Watermarked +3589,0.5166666666666667,0.24057377049180326,-10.793811013450267,0.0017009322854444507,Watermarked +3590,0.55,0.40635245901639344,-3.8261690084495177,0.03144179376080752,Watermarked +3591,0.55,0.4396857923497268,-6.979319263082819,0.006037138823969032,Watermarked +3592,0.639344262295082,0.35833333333333334,-5.646420206490955,0.010994199496502145,Watermarked +3593,0.5,0.35724043715847,-2.2370139019506667,0.11126942224811241,Watermarked +3594,0.5666666666666667,0.4478825136612022,-5.2484139227280515,0.01346950981253263,Watermarked +3595,0.5245901639344263,0.4541666666666667,-3.811215023955327,0.03176182755813695,Watermarked +3596,0.6166666666666667,0.3770491803278689,-5.292311191396577,0.013162886761926772,Watermarked +3597,0.8333333333333334,0.4108606557377049,-28.554907520499913,9.430057361661532e-05,Watermarked +3598,0.5666666666666667,0.4064207650273224,-5.716648520223962,0.01062086627428458,Watermarked +3599,0.5666666666666667,0.4354508196721312,-6.567850831861847,0.007179495661119332,Watermarked +3600,0.6557377049180327,0.4625,-4.525936567821964,0.02017613595003384,Watermarked +3601,0.5666666666666667,0.2616120218579235,-5.789061661601357,0.010253114813387197,Watermarked +3602,0.6065573770491803,0.4208333333333333,-4.46482463507427,0.020926850914564546,Watermarked +3603,0.6166666666666667,0.373292349726776,-5.837995555994575,0.010014006051520334,Watermarked +3604,0.6229508196721312,0.32083333333333336,-5.409452966923079,0.012388083734135366,Watermarked +3605,0.7213114754098361,0.37083333333333335,-13.136517577876024,0.0009528915088720033,Watermarked +3606,0.45,0.38565573770491807,-1.9388846252338177,0.1478731519159202,Watermarked +3607,0.65,0.33975409836065573,-7.109185568026944,0.005726775618868671,Watermarked +3608,0.6166666666666667,0.4028005464480875,-4.245647176969608,0.02393615072729287,Watermarked +3609,0.5333333333333333,0.31509562841530053,-8.1350398352272,0.003883805394600376,Watermarked +3610,0.6065573770491803,0.275,-8.547594094887065,0.0033646767112774094,Watermarked +3611,0.5166666666666667,0.37739071038251365,-4.570466183924919,0.019650992181636566,Watermarked +3612,0.7049180327868853,0.38749999999999996,-10.334980525866824,0.001932389182639011,Watermarked +3613,0.65,0.46461748633879785,-6.40281942286815,0.007717499991952441,Watermarked +3614,0.5245901639344263,0.39166666666666666,-4.3147109239249755,0.022930753286261416,Watermarked +3615,0.5166666666666667,0.3690573770491803,-3.785362525900684,0.0323250246243217,Watermarked +3616,0.5666666666666667,0.4151639344262295,-3.7566753428966173,0.03296505095792107,Watermarked +3617,0.5409836065573771,0.35416666666666663,-17.816045884784717,0.0003855963684750506,Watermarked +3618,0.5333333333333333,0.35239071038251363,-4.290395689342995,0.023278384354672565,Watermarked +3619,0.8032786885245902,0.4291666666666667,-17.0700379678159,0.00043795305550041073,Watermarked +3620,0.7213114754098361,0.4,-27.264182776241938,0.00010829140307033679,Watermarked +3621,0.5245901639344263,0.4166666666666667,-3.172289994751985,0.05039180926567533,Watermarked +3622,0.45,0.3867030965391621,-18.78378378378386,0.0028222292471545055,Watermarked +3623,0.5573770491803278,0.35833333333333334,-6.460980283821876,0.007522028995284054,Watermarked +3624,0.5573770491803278,0.4041666666666667,-8.291518233493496,0.0036752095251250446,Watermarked +3625,0.55,0.38524590163934425,-2.953558943086598,0.059853030783475765,Watermarked +3626,0.6166666666666667,0.4278005464480874,-3.220215216567121,0.048576294366246366,Watermarked +3627,0.5245901639344263,0.35416666666666663,-7.776108221599537,0.00442500536007881,Watermarked +3628,0.6229508196721312,0.3375,-6.2978017747795265,0.00808769678640319,Watermarked +3629,0.5,0.39392076502732243,-2.8228292898095364,0.06658256000513207,Watermarked +3630,0.6333333333333333,0.3321038251366121,-9.748782925528516,0.002293024514092687,Watermarked +3631,0.5,0.3729508196721311,-3.16062146897757,0.05084675449389502,Watermarked +3632,0.6666666666666666,0.31974043715846995,-16.761645970184002,0.0004623636950420215,Watermarked +3633,0.5666666666666667,0.4396174863387978,-7.124258314479804,0.0056921272252719556,Watermarked +3634,0.6333333333333333,0.46072404371584696,-5.791146439440572,0.010242776156530948,Watermarked +3635,0.5409836065573771,0.4125,-7.478844437595781,0.00495105882203386,Watermarked +3636,0.6666666666666666,0.4194672131147541,-3.705356086132572,0.0341510693846338,Watermarked +3637,0.6065573770491803,0.2583333333333333,-11.303389666068615,0.0014850505321337656,Watermarked +3638,0.4262295081967213,0.37499999999999994,-2.582483761527067,0.0816038163981699,Watermarked +3639,0.6333333333333333,0.3319672131147541,-16.69265269416492,0.00046807129563190304,Watermarked +3640,0.6,0.42718579234972676,-4.989229131610327,0.01548368101755126,Watermarked +3641,0.7049180327868853,0.37916666666666665,-11.48552138205233,0.0014167460006659608,Watermarked +3642,0.5,0.3734289617486339,-7.373424834781318,0.0051573670428228195,Watermarked +3643,0.8,0.3737704918032787,-11.276355501471617,0.0014955591121438806,Watermarked +3644,0.6885245901639344,0.35833333333333334,-6.697501094028932,0.006790960911751886,Watermarked +3645,0.7377049180327869,0.37916666666666665,-18.208323627746466,0.0003613808590242596,Watermarked +3646,0.7213114754098361,0.47500000000000003,-5.911475409836067,0.009668500550400335,Watermarked +3647,0.5573770491803278,0.35,-3.567189130512097,0.03762717652367037,Watermarked +3648,0.5,0.4062841530054645,-1.9500758855993163,0.1462629282557643,Watermarked +3649,0.7049180327868853,0.3291666666666666,-35.834028669908896,4.779336400647669e-05,Watermarked +3650,0.55,0.36475409836065575,-5.115536577871306,0.014456377843406167,Watermarked +3651,0.5166666666666667,0.3778005464480874,-2.8771732821175253,0.06367584045734935,Watermarked +3652,0.5166666666666667,0.39064207650273225,-2.5781209852352887,0.08191326217312117,Watermarked +3653,0.6333333333333333,0.398155737704918,-6.811947916107325,0.006470670821303707,Watermarked +3654,0.5573770491803278,0.35416666666666663,-3.722323789331516,0.03375297717263195,Watermarked +3655,0.6333333333333333,0.40225409836065573,-7.458643842395928,0.004989737155548977,Watermarked +3656,0.5409836065573771,0.39583333333333337,-4.726024019252636,0.017950011968011444,Watermarked +3657,0.5573770491803278,0.2833333333333333,-23.253380378036304,0.00017423225248505516,Watermarked +3658,0.6229508196721312,0.4375,-3.696204554145968,0.034368276506610014,Watermarked +3659,0.48333333333333334,0.41976320582877963,-5.126120909780012,0.03601274479900024,Watermarked +3660,0.4918032786885246,0.31666666666666665,-3.5694569453443568,0.03756656146901733,Watermarked +3661,0.7049180327868853,0.31666666666666665,-19.02035477866064,0.000317329217166385,Watermarked +3662,0.6833333333333333,0.2987704918032787,-9.287563234189735,0.002641987913158529,Watermarked +3663,0.5901639344262295,0.35,-20.378585595507392,0.00025834295408300217,Watermarked +3664,0.5666666666666667,0.448292349726776,-3.5599428053606994,0.03782169758880093,Watermarked +3665,0.6166666666666667,0.4021857923497268,-8.675666340422445,0.0032223344164688237,Watermarked +3666,0.6885245901639344,0.3958333333333333,-12.228241897871557,0.0011776614876831963,Watermarked +3667,0.6333333333333333,0.4060109289617486,-4.451765940749828,0.0210919401610888,Watermarked +3668,0.6,0.3400956284153005,-16.126211408017976,0.0005186735594686228,Watermarked +3669,0.6721311475409836,0.3583333333333334,-21.740550300468346,0.00021299095231525587,Watermarked +3670,0.6,0.4187158469945355,-5.0760762819997485,0.014767517268100116,Watermarked +3671,0.5833333333333334,0.4314890710382514,-4.691101983797846,0.018314699259855377,Watermarked +3672,0.6166666666666667,0.4105191256830601,-6.871714846368723,0.006311291308420261,Watermarked +3673,0.5666666666666667,0.4023907103825136,-3.1785087830863152,0.050151443196549406,Watermarked +3674,0.5666666666666667,0.38989071038251366,-8.909952627909695,0.002981899212734121,Watermarked +3675,0.6666666666666666,0.3488387978142076,-10.158428205179094,0.0020325642199010164,Watermarked +3676,0.5,0.39016393442622954,-4.155570602285484,0.025334728700013992,Watermarked +3677,0.6,0.426844262295082,-3.4665463985771736,0.04044752053565438,Watermarked +3678,0.65,0.42295081967213116,-5.864413389759132,0.009887952398219825,Watermarked +3679,0.6,0.4019808743169399,-3.3292886989365154,0.044742920567822704,Watermarked +3680,0.639344262295082,0.39999999999999997,-9.082476164942625,0.0028198267013386054,Watermarked +3681,0.5081967213114754,0.3541666666666667,-5.142940376750061,0.014245317657868042,Watermarked +3682,0.7704918032786885,0.3791666666666667,-10.107871974839727,0.0020625133025401216,Watermarked +3683,0.7704918032786885,0.41666666666666663,-15.679030900154354,0.0005638836832955063,Watermarked +3684,0.6721311475409836,0.41666666666666663,-12.515152510941379,0.0010996877982163017,Watermarked +3685,0.5333333333333333,0.4605191256830601,-4.084201256082031,0.02651851352137615,Watermarked +3686,0.55,0.4396857923497267,-2.8054956411927168,0.06754435460228396,Watermarked +3687,0.6721311475409836,0.3416666666666667,-10.239067447514469,0.001985976142870652,Watermarked +3688,0.5901639344262295,0.42083333333333334,-5.382812644531547,0.012558941549593422,Watermarked +3689,0.5,0.42711748633879776,-1.4944177904953027,0.2319280697354033,Watermarked +3690,0.55,0.40232240437158473,-6.785424665478694,0.006543098179519793,Watermarked +3691,0.5333333333333333,0.3818989071038251,-3.1559440965062726,0.05103057602991868,Watermarked +3692,0.5737704918032787,0.375,-3.141020234264478,0.051622720218920067,Watermarked +3693,0.5333333333333333,0.33706739526411655,-12.955859792395307,0.005904832207349002,Watermarked +3694,0.7704918032786885,0.32916666666666666,-8.41751169737434,0.0035178920060083122,Watermarked +3695,0.5409836065573771,0.4541666666666666,-3.061039563134522,0.0549480767673967,Watermarked +3696,0.55,0.39822404371584696,-3.1169399358461005,0.052596564456531714,Watermarked +3697,0.6229508196721312,0.425,-4.093911885056293,0.026353313515894105,Watermarked +3698,0.6,0.41072404371584703,-16.306113587728134,0.0005018457959471869,Watermarked +3699,0.6229508196721312,0.46249999999999997,-3.08642244690056,0.05386425088654483,Watermarked +3700,0.5333333333333333,0.43155737704918035,-7.41608714028745,0.00507253495755017,Watermarked +3701,0.639344262295082,0.42916666666666664,-3.7914997183027954,0.03219017406449965,Watermarked +3702,0.55,0.4023224043715847,-5.3872290083001095,0.012530405150481858,Watermarked +3703,0.5833333333333334,0.3773224043715847,-4.640651750707308,0.018858726540020358,Watermarked +3704,0.48333333333333334,0.3860655737704918,-3.4475208334165512,0.04101062763956584,Watermarked +3705,0.6229508196721312,0.4333333333333334,-5.465357642986741,0.01203929791125337,Watermarked +3706,0.5081967213114754,0.3458333333333333,-3.3291936924471166,0.04474608942509305,Watermarked +3707,0.48333333333333334,0.37315573770491806,-2.5737901679797495,0.0822218718502907,Watermarked +3708,0.5573770491803278,0.41666666666666663,-9.248430069349528,0.0026747507004180183,Watermarked +3709,0.6333333333333333,0.3398224043715847,-7.915878353618565,0.004203089684479934,Watermarked +3710,0.6333333333333333,0.419603825136612,-4.140759720616552,0.025574700755914715,Watermarked +3711,0.6666666666666666,0.4316939890710383,-11.006210975743855,0.0016061976208647793,Watermarked +3712,0.5,0.38627049180327866,-2.911570443803633,0.06191769354930334,Watermarked +3713,0.5833333333333334,0.46427595628415297,-1.8536473961974427,0.16084088720625275,Watermarked +3714,0.5409836065573771,0.4125,-2.7434685305994506,0.07113088858607176,Watermarked +3715,0.55,0.3528688524590164,-4.518525749449755,0.02026529222371772,Watermarked +3716,0.5666666666666667,0.4687158469945355,-6.390672157378807,0.007759165768176083,Watermarked +3717,0.6333333333333333,0.39788251366120214,-5.975152199128738,0.009381627777611113,Watermarked +3718,0.5901639344262295,0.44583333333333336,-4.1303709900535255,0.025744775027266065,Watermarked +3719,0.6,0.32766393442622954,-15.565235673998512,0.0005762193133083165,Watermarked +3720,0.5737704918032787,0.39166666666666666,-6.397744904460955,0.007734870011183786,Watermarked +3721,0.5573770491803278,0.41666666666666663,-2.647817826363915,0.0771377198138251,Watermarked +3722,0.5833333333333334,0.43128415300546447,-3.57266399939355,0.03748105460475532,Watermarked +3723,0.5737704918032787,0.4625,-2.7495312426784695,0.07077005262877357,Watermarked +3724,0.639344262295082,0.3291666666666666,-16.78635055652876,0.00046034248285934797,Watermarked +3725,0.5409836065573771,0.37083333333333335,-3.985191937237371,0.02828153983006206,Watermarked +3726,0.6166666666666667,0.3559426229508197,-3.590780809031045,0.037002659109130365,Watermarked +3727,0.8360655737704918,0.30833333333333335,-25.163943419716063,0.00013761668634114897,Watermarked +3728,0.8360655737704918,0.30833333333333335,-14.528409507245575,0.0007070637377049956,Watermarked +3729,0.6333333333333333,0.4310109289617486,-3.9857409240546327,0.028271354653200294,Watermarked +3730,0.5245901639344263,0.2958333333333334,-5.2910842726589395,0.013171332285404133,Watermarked +3731,0.6166666666666667,0.3568306010928962,-24.813855969761136,0.00014350088541621058,Watermarked +3732,0.7704918032786885,0.45,-21.064870818149505,0.00023403567730180875,Watermarked +3733,0.55,0.38135245901639336,-5.256718568825,0.013410791921047589,Watermarked +3734,0.4666666666666667,0.39009562841530054,-2.671727253283191,0.07557911199765509,Watermarked +3735,0.5666666666666667,0.4108606557377049,-3.0138201768645576,0.05703829450534057,Watermarked +3736,0.6833333333333333,0.31509562841530053,-12.567253014018696,0.0010862702962580325,Watermarked +3737,0.5409836065573771,0.48333333333333334,-8.472825175856576,0.003451637475653648,Watermarked +3738,0.7704918032786885,0.3583333333333333,-12.770263120773526,0.001036015557970406,Watermarked +3739,0.4918032786885246,0.3708333333333333,-2.3891724073960368,0.09681354824211097,Watermarked +3740,0.7,0.4483606557377049,-12.885005111050303,0.0010089720709264307,Watermarked +3741,0.6885245901639344,0.40416666666666673,-5.830640867335859,0.010049473664175738,Watermarked +3742,0.5737704918032787,0.37083333333333335,-10.982657455064276,0.0016163522474045274,Watermarked +3743,0.5666666666666667,0.38954918032786884,-4.0760180019956564,0.02665876696412885,Watermarked +3744,0.5166666666666667,0.3447404371584699,-3.192419176892379,0.0496190039928642,Watermarked +3745,0.5,0.4438524590163935,-1.784232222780322,0.17238357843904029,Watermarked +3746,0.5,0.4143897996357013,-8.657804801663772,0.013079710332824616,Watermarked +3747,0.6065573770491803,0.3875,-5.491151239179569,0.011882700028816374,Watermarked +3748,0.6333333333333333,0.46885245901639344,-3.6820317950825556,0.03470816139558017,Watermarked +3749,0.6229508196721312,0.47500000000000003,-3.5044017326627706,0.03935606956265976,Watermarked +3750,0.7704918032786885,0.3458333333333333,-15.423254035682769,0.0005921188778650158,Watermarked +3751,0.75,0.42745901639344264,-24.49428445453386,0.00014916835502607742,Watermarked +3752,0.7540983606557377,0.4041666666666667,-11.683939490062333,0.0013469985474943292,Watermarked +3753,0.5737704918032787,0.4125,-3.7772132331663713,0.03250520782748611,Watermarked +3754,0.6885245901639344,0.3,-10.255681847231036,0.0019765537724212886,Watermarked +3755,0.48333333333333334,0.3360655737704918,-10.610181401233849,0.00178889887952055,Watermarked +3756,0.8360655737704918,0.3875,-18.026271996123526,0.0003723594365652728,Watermarked +3757,0.6833333333333333,0.3524590163934427,-9.375725828304732,0.002570100386249655,Watermarked +3758,0.6885245901639344,0.45833333333333337,-7.472032792203066,0.004964056821106828,Watermarked +3759,0.5,0.4308743169398907,-4.435866726830125,0.04724822608025257,Watermarked +3760,0.6557377049180327,0.4208333333333334,-5.724222068679529,0.010581602969978048,Watermarked +3761,0.6166666666666667,0.4349726775956284,-3.1471966691041255,0.05137660775019963,Watermarked +3762,0.55,0.4689207650273224,-2.927463218319362,0.0611258808814014,Watermarked +3763,0.4,0.2701502732240437,-3.40514995545028,0.042300988302641715,Watermarked +3764,0.5833333333333334,0.4566256830601093,-3.4235368770297985,0.04173478390661575,Watermarked +3765,0.55,0.4398224043715847,-10.558130916542433,0.0018149271537199203,Watermarked +3766,0.6557377049180327,0.45416666666666666,-5.7684452397766,0.010356094063164631,Watermarked +3767,0.5166666666666667,0.41454918032786886,-2.3715832376714867,0.09836303411399833,Watermarked +3768,0.55,0.3109289617486338,-6.228218140785886,0.008345917876290724,Watermarked +3769,0.5573770491803278,0.3916666666666667,-5.9956272290627375,0.00929176090610574,Watermarked +3770,0.6666666666666666,0.42745901639344264,-26.610860299950033,0.00011643687772756474,Watermarked +3771,0.5081967213114754,0.35000000000000003,-3.224205259523217,0.048428898137238975,Watermarked +3772,0.7213114754098361,0.3916666666666666,-8.7724963613773,0.0031199616023164897,Watermarked +3773,0.55,0.33128415300546443,-3.6830493742159134,0.03468361534744764,Watermarked +3774,0.5901639344262295,0.4125,-4.161173917605138,0.025244699113682725,Watermarked +3775,0.6065573770491803,0.49583333333333335,-2.859986118208274,0.0645777020694818,Watermarked +3776,0.5666666666666667,0.435724043715847,-3.1832159126292936,0.04997046721617775,Watermarked +3777,0.5901639344262295,0.4166666666666667,-8.499595555559111,0.003420163998746116,Watermarked +3778,0.5666666666666667,0.46038251366120214,-2.3871021119027693,0.09699437660783286,Watermarked +3779,0.4666666666666667,0.3898224043715847,-1.856072621295239,0.16045407134930564,Watermarked +3780,0.5245901639344263,0.3833333333333333,-3.203403812225747,0.049203590003784436,Watermarked +3781,0.6557377049180327,0.4000000000000001,-37.585613102377934,4.1428594619016965e-05,Watermarked +3782,0.6557377049180327,0.3625,-9.790972522123287,0.002264226155807123,Watermarked +3783,0.5666666666666667,0.3983606557377049,-5.666139775569049,0.010887658149858946,Watermarked +3784,0.5666666666666667,0.37698087431693994,-3.066930962984633,0.054694091557813726,Watermarked +3785,0.5737704918032787,0.4291666666666667,-3.794152560630122,0.03213210729307197,Watermarked +3786,0.5901639344262295,0.4375,-4.005635792635015,0.027905415031102557,Watermarked +3787,0.6166666666666667,0.4398907103825137,-15.578682000811366,0.0005747431214231546,Watermarked +3788,0.5333333333333333,0.4480191256830601,-6.166892456957159,0.008582542762464446,Watermarked +3789,0.6166666666666667,0.44760928961748636,-3.4952014484831047,0.039617848196352085,Watermarked +3790,0.5737704918032787,0.3541666666666667,-3.9615442295558925,0.028724785655088337,Watermarked +3791,0.8032786885245902,0.29583333333333334,-12.539145243620842,0.0010934817370295948,Watermarked +3792,0.6557377049180327,0.45416666666666666,-7.1071028723917875,0.0057315850409424066,Watermarked +3793,0.47540983606557374,0.42083333333333334,-6.840395759439469,0.006394156517694729,Watermarked +3794,0.6557377049180327,0.4083333333333334,-4.492763882679988,0.020579223849711902,Watermarked +3795,0.5333333333333333,0.41905737704918034,-6.292690992312831,0.008106302500641403,Watermarked +3796,0.7,0.4064890710382514,-7.456642804764441,0.004993590238372753,Watermarked +3797,0.5573770491803278,0.4083333333333334,-2.8639314065795065,0.06436927783811348,Watermarked +3798,0.6229508196721312,0.4416666666666667,-4.673530022214723,0.018501844031635108,Watermarked +3799,0.6065573770491803,0.3916666666666667,-4.31785074614014,0.02288635431540596,Watermarked +3800,0.7704918032786885,0.3125,-21.98360655737705,0.0002060384328133572,Watermarked +3801,0.6065573770491803,0.38333333333333336,-4.785407410045699,0.017351257215718095,Watermarked +3802,0.45901639344262296,0.35,-3.0834492425511746,0.05398980423292988,Watermarked +3803,0.5573770491803278,0.39166666666666666,-3.7805287666533993,0.032431745470329315,Watermarked +3804,0.6065573770491803,0.39166666666666666,-5.539911623921449,0.011593870922493347,Watermarked +3805,0.5833333333333334,0.410724043715847,-11.448148657407378,0.0014304164055433837,Watermarked +3806,0.6065573770491803,0.4291666666666667,-3.064527056875999,0.05479754804131916,Watermarked +3807,0.5666666666666667,0.4691256830601093,-4.840779946644281,0.016816135457307743,Watermarked +3808,0.5666666666666667,0.45669398907103825,-4.048874395684384,0.02713088105493987,Watermarked +3809,0.5573770491803278,0.39583333333333337,-2.771678715670956,0.0694710760166772,Watermarked +3810,0.5166666666666667,0.44836065573770495,-3.137402547907732,0.051767564979331604,Watermarked +3811,0.5409836065573771,0.35833333333333334,-9.207415437764503,0.0027096685823184735,Watermarked +3812,0.6065573770491803,0.37916666666666665,-6.7690431384831955,0.006588365191149356,Watermarked +3813,0.5833333333333334,0.43995901639344265,-6.955531695206198,0.0060963729996066736,Watermarked +3814,0.5573770491803278,0.37916666666666665,-4.17397260708643,0.02504060302241229,Watermarked +3815,0.7213114754098361,0.3875,-13.414708580029796,0.0008955843183091936,Watermarked +3816,0.6166666666666667,0.5266393442622951,-2.0100838668302545,0.13797226511673932,Watermarked +3817,0.6166666666666667,0.3726775956284153,-3.954974925804998,0.028849502427145693,Watermarked +3818,0.7049180327868853,0.44583333333333336,-6.313455744839042,0.00803105673664604,Watermarked +3819,0.6166666666666667,0.44815573770491807,-4.067460467766922,0.02680645933365517,Watermarked +3820,0.5737704918032787,0.475,-3.0602917588065157,0.054980421525545886,Watermarked +3821,0.4666666666666667,0.3775273224043716,-9.402775448335925,0.0025485643327779633,Watermarked +3822,0.6166666666666667,0.36489071038251364,-8.409157084603672,0.003528044949249387,Watermarked +3823,0.5333333333333333,0.42725409836065575,-3.221457895636802,0.04853032779203252,Watermarked +3824,0.6229508196721312,0.4625,-3.0100455180506156,0.05720966272875764,Watermarked +3825,0.55,0.385792349726776,-4.6686976061393075,0.018553745232426106,Watermarked +3826,0.6,0.37732240437158465,-5.919359446594679,0.009632363464382891,Watermarked +3827,0.5573770491803278,0.45,-2.3790961945021833,0.0976975284567096,Watermarked +3828,0.6666666666666666,0.47274590163934427,-6.685134180648596,0.006826809830237584,Watermarked +3829,0.5409836065573771,0.42500000000000004,-1.7723627686294894,0.17445150254614536,Watermarked +3830,0.5833333333333334,0.4687158469945355,-2.42952422971014,0.09336973169699041,Watermarked +3831,0.5666666666666667,0.44781420765027324,-2.175312723682659,0.1178663975785963,Watermarked +3832,0.5666666666666667,0.42698087431693993,-2.926838266377036,0.06115677716147604,Watermarked +3833,0.6,0.4226775956284153,-3.0320096719785434,0.05622147844106881,Watermarked +3834,0.6885245901639344,0.42500000000000004,-8.165010792168419,0.0038426628271698176,Watermarked +3835,0.7166666666666667,0.40689890710382515,-13.281769925732059,0.00092238087463812,Watermarked +3836,0.7540983606557377,0.4541666666666667,-7.998178506375221,0.004079263296996052,Watermarked +3837,0.5901639344262295,0.4083333333333333,-9.166095697580067,0.002745459142872884,Watermarked +3838,0.4666666666666667,0.3903688524590164,-2.6475959716199546,0.07715236726431632,Watermarked +3839,0.5666666666666667,0.4352459016393443,-2.217999452996241,0.11325398002800678,Watermarked +3840,0.5409836065573771,0.4,-2.7939211613370594,0.06819623625179277,Watermarked +3841,0.5833333333333334,0.3810109289617486,-3.117346730699978,0.052579921647726034,Watermarked +3842,0.55,0.39419398907103825,-3.6055517374511754,0.036618371391318225,Watermarked +3843,0.5573770491803278,0.41250000000000003,-3.8013214837489318,0.03197586070428311,Watermarked +3844,0.5901639344262295,0.41666666666666663,-5.316864560968578,0.01299535017353051,Watermarked +3845,0.7049180327868853,0.37916666666666665,-11.831027585036374,0.001298210003952447,Watermarked +3846,0.639344262295082,0.4041666666666667,-8.063231850117097,0.003984775525196651,Watermarked +3847,0.5833333333333334,0.3689890710382514,-4.439726457213278,0.021245642150683707,Watermarked +3848,0.7166666666666667,0.3025273224043716,-11.897761281046622,0.001276847670564472,Watermarked +3849,0.6229508196721312,0.44166666666666665,-4.990732440102222,0.015470903850084424,Watermarked +3850,0.5833333333333334,0.3980874316939891,-3.5674165199732903,0.03762109314075504,Watermarked +3851,0.6333333333333333,0.40273224043715844,-4.589204955202506,0.019435305052459464,Watermarked +3852,0.6166666666666667,0.3774590163934426,-13.772608526452023,0.0008284008874866314,Watermarked +3853,0.55,0.39405737704918037,-4.944621149115616,0.015869157656486683,Watermarked +3854,0.4666666666666667,0.37315573770491806,-4.038348082595869,0.027316863729051996,Watermarked +3855,0.55,0.40259562841530055,-3.6156991795445577,0.0363573206604073,Watermarked +3856,0.6557377049180327,0.4,-10.42437348156729,0.0018841578114493912,Watermarked +3857,0.5333333333333333,0.4603142076502732,-2.0946414252764485,0.1272128293571138,Watermarked +3858,0.5833333333333334,0.41905737704918034,-3.9625676262993768,0.02870541916232176,Watermarked +3859,0.7377049180327869,0.29166666666666663,-14.478449297651643,0.0007143254209499869,Watermarked +3860,0.5737704918032787,0.38749999999999996,-10.080682221460885,0.0020788624411748344,Watermarked +3861,0.6721311475409836,0.37916666666666665,-13.36742611721058,0.0009049950751945303,Watermarked +3862,0.4918032786885246,0.45,-2.0479340472449516,0.13302782647013972,Watermarked +3863,0.639344262295082,0.42916666666666664,-4.986422813713202,0.015507569745157936,Watermarked +3864,0.5,0.441712204007286,-2.327272727272727,0.14541235094098032,Watermarked +3865,0.6833333333333333,0.45997267759562843,-3.7679579839771797,0.03271140841418524,Watermarked +3866,0.6166666666666667,0.4976775956284153,-5.300050196391964,0.013109777815587411,Watermarked +3867,0.8032786885245902,0.4041666666666667,-31.928961748633885,6.751260437384004e-05,Watermarked +3868,0.43333333333333335,0.3358606557377049,-2.42291804775351,0.09392318031625656,Watermarked +3869,0.6333333333333333,0.4194672131147541,-3.4472491733646837,0.041018740078086716,Watermarked +3870,0.5737704918032787,0.39583333333333337,-4.526712258059946,0.02016683338787022,Watermarked +3871,0.5833333333333334,0.48101092896174863,-3.9726503315641613,0.02851551151540454,Watermarked +3872,0.6333333333333333,0.4441256830601093,-9.737517564732535,0.00230079619793927,Watermarked +3873,0.7704918032786885,0.38749999999999996,-6.709762545561513,0.006755661879342611,Watermarked +3874,0.48333333333333334,0.3202185792349727,-8.067567567567563,0.015019105066650768,Watermarked +3875,0.5666666666666667,0.4229508196721311,-4.277954683338313,0.023458887518037853,Watermarked +3876,0.5,0.4062841530054645,-2.608245612118408,0.0798056461655953,Watermarked +3877,0.6166666666666667,0.423155737704918,-3.308126758133395,0.045455844114838176,Watermarked +3878,0.8032786885245902,0.2916666666666667,-13.189427229234468,0.0009416245667257401,Watermarked +3879,0.48333333333333334,0.40239071038251367,-2.8278186890378114,0.06630888074205549,Watermarked +3880,0.55,0.4270491803278688,-2.1944780188324806,0.11576816528411227,Watermarked +3881,0.5666666666666667,0.3773224043715847,-3.575689868461351,0.03740060612655586,Watermarked +3882,0.6065573770491803,0.43333333333333335,-6.573390283759843,0.007162305426399894,Watermarked +3883,0.7377049180327869,0.425,-7.134072833363263,0.005669714559524623,Watermarked +3884,0.5573770491803278,0.39999999999999997,-5.171937592179927,0.014026340881299086,Watermarked +3885,0.55,0.4687158469945355,-2.666321218691447,0.07592808657201537,Watermarked +3886,0.5833333333333334,0.4603142076502732,-3.6716000925111048,0.03496108120386053,Watermarked +3887,0.5333333333333333,0.41045081967213115,-1.9837288411299292,0.14154345106445665,Watermarked +3888,0.6,0.4400273224043716,-4.325688830675532,0.022776003224226602,Watermarked +3889,0.5166666666666667,0.42288251366120216,-2.3707547764032237,0.09843675721882074,Watermarked +3890,0.5901639344262295,0.42500000000000004,-3.0224716644609666,0.056647945467235325,Watermarked +3891,0.639344262295082,0.3791666666666667,-5.283658664721413,0.013222597948006223,Watermarked +3892,0.5666666666666667,0.36885245901639346,-2.7563407387163426,0.07036747746122053,Watermarked +3893,0.6721311475409836,0.32499999999999996,-16.552311734273484,0.0004799737975285094,Watermarked +3894,0.5666666666666667,0.43128415300546447,-5.492433465454693,0.011874985075137982,Watermarked +3895,0.6,0.4478142076502733,-4.942348751891569,0.01588912728706134,Watermarked +3896,0.6333333333333333,0.40225409836065573,-6.568502779421056,0.007177469697378324,Watermarked +3897,0.5333333333333333,0.3775273224043716,-10.30131962224801,0.0019509748009049809,Watermarked +3898,0.55,0.4235655737704918,-2.574541708593826,0.08216821527460087,Watermarked +3899,0.5333333333333333,0.45614754098360655,-2.6559601893971574,0.07660250967292412,Watermarked +3900,0.7049180327868853,0.32916666666666666,-11.674705169820832,0.0013501421719868342,Watermarked +3901,0.5666666666666667,0.4108606557377049,-6.418429391739877,0.007664387655280491,Watermarked +3902,0.7540983606557377,0.4208333333333334,-15.206265335945545,0.0006175648734416628,Watermarked +3903,0.5737704918032787,0.3625,-3.600412240200614,0.036751502378933966,Watermarked +3904,0.7,0.419603825136612,-5.8545065009896256,0.00993497840099364,Watermarked +3905,0.6333333333333333,0.4602459016393443,-5.345769495574332,0.012801672818638945,Watermarked +3906,0.5833333333333334,0.3818989071038251,-15.322633755583363,0.0006037415969847308,Watermarked +3907,0.5573770491803278,0.48750000000000004,-3.044989012722217,0.055647600875460466,Watermarked +3908,0.5901639344262295,0.45,-3.5859661472271527,0.03712903167578981,Watermarked +3909,0.5666666666666667,0.34822404371584703,-4.589291871691855,0.019434311800169517,Watermarked +3910,0.47540983606557374,0.35833333333333334,-3.1680031770414296,0.050558348943150866,Watermarked +3911,0.5901639344262295,0.3916666666666666,-13.752294116926654,0.0008320313667330868,Watermarked +3912,0.5833333333333334,0.36038251366120216,-4.510639994744203,0.02036072397240279,Watermarked +3913,0.4918032786885246,0.36666666666666664,-2.214046423571898,0.1136718934082652,Watermarked +3914,0.47540983606557374,0.44166666666666665,-2.3377953522924186,0.10142500753372925,Watermarked +3915,0.6833333333333333,0.3445355191256831,-8.421776890073554,0.0035127236241960158,Watermarked +3916,0.55,0.43196721311475406,-3.1667548890956354,0.05060697458442003,Watermarked +3917,0.5666666666666667,0.4644808743169399,-2.238658218669735,0.11109977799644323,Watermarked +3918,0.5,0.3859289617486339,-4.12442499010612,0.025842773831512598,Watermarked +3919,0.6166666666666667,0.3979508196721311,-6.410996076998129,0.00768961910808135,Watermarked +3920,0.5,0.38579234972677595,-3.247114142040317,0.04759348039328008,Watermarked +3921,0.7868852459016393,0.4,-32.828301710168866,6.212646695041301e-05,Watermarked +3922,0.5737704918032787,0.4,-9.652812843530201,0.002360364565844294,Watermarked +3923,0.7213114754098361,0.33749999999999997,-13.159250585480095,0.0009480287588880445,Watermarked +3924,0.6065573770491803,0.3958333333333333,-3.9531674853921452,0.02888393838786599,Watermarked +3925,0.6065573770491803,0.38750000000000007,-15.392023937439065,0.0005956941961595111,Watermarked +3926,0.5409836065573771,0.475,-2.1418349190706736,0.12164197076622749,Watermarked +3927,0.55,0.3730874316939891,-3.9173359194644175,0.029577655575744665,Watermarked +3928,0.6885245901639344,0.3583333333333334,-6.697501094028931,0.006790960911751886,Watermarked +3929,0.5833333333333334,0.4520491803278689,-3.381086105792825,0.04305684351738099,Watermarked +3930,0.45,0.34419398907103826,-5.4455564685402775,0.012161350131513373,Watermarked +3931,0.75,0.4474726775956285,-5.428996703057984,0.012264667663581029,Watermarked +3932,0.38333333333333336,0.3279371584699453,-4.7611795122867155,0.017592360487795486,Watermarked +3933,0.5,0.39009562841530054,-7.41768759066674,0.005069388475696104,Watermarked +3934,0.5081967213114754,0.42083333333333334,-1.9061102831594638,0.1527095539473995,Watermarked +3935,0.43333333333333335,0.38586065573770495,-2.611459754304495,0.07958474323908545,Watermarked +3936,0.7666666666666667,0.39009562841530054,-41.93436311911947,2.9845043324434687e-05,Watermarked +3937,0.6,0.40218579234972673,-4.703745486314339,0.01818156231348825,Watermarked +3938,0.5901639344262295,0.375,-5.5469553767808515,0.011552909711436831,Watermarked +3939,0.5409836065573771,0.43333333333333335,-2.4127247774526692,0.09478502280607064,Watermarked +3940,0.65,0.3896174863387978,-6.0971062067672115,0.008862646607214728,Watermarked +3941,0.5245901639344263,0.4375,-2.361603838210462,0.09925556793924133,Watermarked +3942,0.7377049180327869,0.4333333333333333,-7.354109955820557,0.0051963894030398975,Watermarked +3943,0.55,0.4151639344262295,-5.271696829221992,0.013305730606527047,Watermarked +3944,0.6166666666666667,0.4810792349726776,-3.1931249674651547,0.04959217968663012,Watermarked +3945,0.5901639344262295,0.42500000000000004,-4.904097017214922,0.01623024607177803,Watermarked +3946,0.5,0.3939890710382513,-2.375708105544391,0.09799696855032386,Watermarked +3947,0.6,0.448292349726776,-5.984088746615719,0.00934226482517408,Watermarked +3948,0.5081967213114754,0.42083333333333334,-2.548903055279766,0.08402331367690975,Watermarked +3949,0.6065573770491803,0.37499999999999994,-6.875471004283952,0.006301448294158944,Watermarked +3950,0.6229508196721312,0.375,-8.04851009961339,0.004005903148525875,Watermarked +3951,0.6166666666666667,0.43565573770491806,-8.810845426280132,0.00308060726714541,Watermarked +3952,0.5901639344262295,0.44583333333333336,-3.786983637511313,0.03228933418227714,Watermarked +3953,0.5333333333333333,0.4066256830601093,-3.9383341934243377,0.0291685537410846,Watermarked +3954,0.65,0.4560792349726776,-5.067243916956274,0.014838352365228525,Watermarked +3955,0.6,0.41058743169398904,-4.106153946552308,0.026146933323788254,Watermarked +3956,0.6065573770491803,0.3666666666666667,-11.1491127146561,0.0015463681392427176,Watermarked +3957,0.5901639344262295,0.4,-10.563455564617959,0.0018122415508561155,Watermarked +3958,0.5409836065573771,0.3625,-5.013582256122774,0.015278375182884808,Watermarked +3959,0.6557377049180327,0.38333333333333336,-4.064948814456438,0.026850007364746683,Watermarked +3960,0.55,0.3569672131147541,-7.573495790587439,0.004775004170929797,Watermarked +3961,0.5573770491803278,0.32500000000000007,-3.590011324398241,0.03702281920404291,Watermarked +3962,0.7213114754098361,0.4666666666666667,-4.791779056643048,0.017288564339617386,Watermarked +3963,0.5666666666666667,0.4355191256830601,-2.7776540598466357,0.06912570353142605,Watermarked +3964,0.5245901639344263,0.325,-7.221443843083816,0.005475227116046089,Watermarked +3965,0.5833333333333334,0.4687841530054645,-6.1047509028321505,0.008831382943901647,Watermarked +3966,0.6229508196721312,0.4041666666666667,-12.735108311334004,0.0010444936119927254,Watermarked +3967,0.6065573770491803,0.4458333333333333,-4.993743185709583,0.015445355698632398,Watermarked +3968,0.6333333333333333,0.43920765027322406,-3.6522403422007157,0.0354367438599517,Watermarked +3969,0.6166666666666667,0.3688524590163934,-6.85232843082187,0.006362416633878472,Watermarked +3970,0.5,0.38558743169398907,-3.0561029466886156,0.05516204355542588,Watermarked +3971,0.5666666666666667,0.360724043715847,-8.874674345400543,0.0030165490767111924,Watermarked +3972,0.5333333333333333,0.38642987249544625,-3.3493937263441476,0.07875291250488248,Watermarked +3973,0.6333333333333333,0.4558743169398907,-3.7027285641405947,0.03421325208693942,Watermarked +3974,0.45901639344262296,0.35833333333333334,-2.0822726427097917,0.12872281264009697,Watermarked +3975,0.7166666666666667,0.42691256830601093,-7.141009375063902,0.005653944218972081,Watermarked +3976,0.6,0.42704918032786887,-3.8405509086761342,0.031137895819023056,Watermarked +3977,0.5333333333333333,0.44808743169398907,-2.524971074527996,0.08580160862644565,Watermarked +3978,0.6721311475409836,0.5,-4.543656514596028,0.019965007637516786,Watermarked +3979,0.6,0.5059426229508197,-2.819285590304159,0.06677779498737416,Watermarked +3980,0.6229508196721312,0.4125,-5.2754090950771,0.01327985815124129,Watermarked +3981,0.5,0.3941256830601093,-9.09710470999564,0.002806625908000589,Watermarked +3982,0.7049180327868853,0.35833333333333334,-9.378324457589393,0.002568020999171169,Watermarked +3983,0.7049180327868853,0.3125,-10.826994016721436,0.0016856521372314135,Watermarked +3984,0.6833333333333333,0.49767759562841535,-7.0718291988043145,0.005813854687167852,Watermarked +3985,0.5409836065573771,0.38333333333333336,-3.5333558289091114,0.0385464838592851,Watermarked +3986,0.5833333333333334,0.4810792349726776,-2.348566398200449,0.10043648507703083,Watermarked +3987,0.5666666666666667,0.4560109289617486,-2.3723659709016056,0.09829344194941916,Watermarked +3988,0.55,0.41454918032786886,-2.568978311749152,0.08256644079807948,Watermarked +3989,0.7049180327868853,0.4083333333333333,-6.76630498961627,0.006595971681668088,Watermarked +3990,0.43333333333333335,0.35252732240437157,-2.7722967072440508,0.06943525700132411,Watermarked +3991,0.6333333333333333,0.43968579234972677,-6.462196948447438,0.0075180098331947,Watermarked +3992,0.6229508196721312,0.3958333333333333,-4.368818491249194,0.02218094363638083,Watermarked +3993,0.6666666666666666,0.4734289617486339,-4.251277919668526,0.023852073711438902,Watermarked +3994,0.5737704918032787,0.43333333333333335,-20.63996275066481,0.00024870591964064987,Watermarked +3995,0.5333333333333333,0.40232240437158473,-2.8318259683509934,0.06609008772343258,Watermarked +3996,0.5666666666666667,0.410655737704918,-3.6504320440032956,0.03548159461405821,Watermarked +3997,0.55,0.44795081967213113,-2.5131484576925534,0.08669715647138371,Watermarked +3998,0.6065573770491803,0.38333333333333336,-4.2005187653190665,0.02462402343066429,Watermarked +3999,0.639344262295082,0.4041666666666667,-8.814856757089686,0.003076528811000136,Watermarked diff --git a/papers/atharva_rasane/00_myst_template/banner.png b/papers/atharva_rasane/00_myst_template/banner.png new file mode 100644 index 0000000000000000000000000000000000000000..e6a793bd6c7b21376946a67fcaf6d67668779f27 GIT binary patch literal 506825 zcmV)DK*7I>P)4Tx04R}TU|TKTf5E`Qz|6qKAj%+|m|R@o z7!cqCRKftGQNg!s42&T9vM$K12Rq*H-NILPRC>jxyAF5R)?ung0bD7m#mPYW4-ls% z7ZjB+FfgV7*~;Z51t4|@kS$UKbQF+12gFW?us49%Nf7oCu$qt{XCV6qkS&m$kq9({ z@dc2rlY}Imj>LxA3j#bYu0B2rp~fbLrr`vfOFAe?ttbJ}&iMtEMVaXtB?^wfP)b$s z%uCkOQ7|$vGGSltkdZPs)rkcsX zzNoGw=04e|g00;m9hiL!=000010000Q0000000N)_ z00aO4009610=%FD00aO4009610VV(d008zwH;VuOKmbWZK~#7FJiS?rt=Vc_U(a9vYVnvi4>(MVl2g`6j^eBycmJwz_=tayK|(TL@I7PG-1DuqIw{RY7Li$JxlcB@d5nhxeqTi)pNn-i7lj<(`F**Hs8p)4m`&s9!;fOUeLI%5 zPAvIv-MffFzZ;pwBnsJe1mouaqsTStm0}v-nz3Y;OI}??zEq4Q^UpK>b)^!se4g*T zZ#s)fe;kYXf_oKXHJxzpWlUzHC=`$jU-?`<%GFBDd2KoB$8t80Mzay)%Zs@2+AA^P z{uAcA7!4y+Eb+c+6iX%K8?l@&V)y2LRBFg%K978*6wf|<7?~CG8;qjHIBNBB^iQ8f zel?G4d+oS$cQ^9W5waS^+4)&)ZdIdNKvc-R$eQGfg>;_}9~{MkwHvQXasS=JxN-Yt zbUG#Gx{d{(-RZRA^1K&Q?mM0ixK0Z>ty!~NOh&8N-P?^pe;DO*J__Yb?C)&EU^Ix; zY8Lz38d9GLMD3!}`-W^7NFpd(hXV8Ty`ZOIc`FtrR=wbhIh;HT4$rbXRMTxaOKkLSvg}>-s#**(F z%}RXl-~3Vh>aYG<%yO(HIyW55SyP0|g6H{GZ`E19Y2?tQVm=$4W*s>d(dP-TW#aWe zdp&;l|NejC_D}ybvRX!wHJA>ENsraDEbqyeOWY6rV9|z`gP8F89N8iqR5TNfPAvu( z-6+;dvCgj|w?x-ZhH?7-qsW$PF<&hsm(3z4`^sO5r&DB;tpoyA zNh7c~jEec>m_RYMC(QeMzaM}3SAQ|S{Ifq38Ek5+S&mw@7^i3bm^0rl<62=mJDqkk zYLz6r?!{#cy4|>Wa|c;9qJTWQ$n`gW^Y6!lXQzA)8_gD@j6G*GkQ_F@#3r$g%xV&! zd+pW8&L)A9M5T=WBcGLX;eKo6nPE;{)}l~EXR@pK<-h$m<6Z98n?>|zE2e|}@Y&+< zB5pUz@%a~T#QBEPn;Wfo`Ir7;yz%3AVo@qY85^36XSe}ojCdQ3O0?S> zF}%2l-syQ%nypyP#*rC6i|0Dyc4(D+> zVr1oJR9Ua_C30X~^YH=?6v&V@#@{R^i$KD0|E&isX$G0|H^zw}Mh$(P<1dmkV2;W* zkL~1H2lZrzU1hn3`n=>bW!6AnIA7q05lN+9MbEF~gtv?}dR{8!lMh;RtqlJa@NEnv z7TC+x7uUx}EE$(RZ-w8TFz$Szn0)kPF~on>80#qNcD9l~DCP0J=tzNaPg(C7!t3E@w)S_Uw8r-Edrkb-3pYB^T;ng%6=TqxF(2bw z9vz&=CH6DogOlkp`HOlRKaL(RPP?&w{9gR^|KuzDZ4}!(9X`8?nSKc0?lR=ipDMn~ zU#4Spoqz4E4c@biDzV_AH;lvMLq4^_e{Mt(UAwlsn`|kA9%os;YN;GK>~@At&#}S9 zVu}wPCiBf?iAc;VkX5CCu5z6+_f!{W=vccofp{fIm0^>8vt`Zg|X#6S7x ze-@?NpN;k=c3dHb5mTzA9DZaO&1N+!$Zm!V_2pCSRsAX9u<-)pLsPF=+(*jB~mTK&1)~jcfbBl{EOfDmoabOjO@?-rI@t$ z@p&~I##v?K0kD1qk7{FSnWY~XHn-L}bm+=4vL4R||YB|AQ`X&bp7RjQEE9||E z59}VFM2@(dV?AcBYpadF#?NF3mf#5eYUKH>_UD&&hL4C5n1R>l$Uo4-0F!82`pyjY zxR^6{bhgmmOc)@4d5%*(iyZN3ioerdl{3yV#leDOC5}&q_!q{yW{n(k5uM>(SMMRd ztW-vvFeb_OD~v-hjB2}$@0!E}zr8}Im)JxJz047JX47$uxo>T6GiJSB6ws;idKT03 z(H!l(K8lEJXa|3I+zIkDb;t|~P?cylzQ7n5PWr5iNhnrWtfHWCg<*}yeFiu~mjt!k zgV*|lZVWM;6%M;VkS_}oru>WnWl@CrWEg`%Ken&m0H}=9WY&O(qoZex8#Uq@87Apx zmfWAg;5_P)nIUu*NdUbtP_qDYt|vHOBRBzl3Glgg&H%SO0A`7!Fo>>kD&A{_GZ!-l zAaIQkGyIar;m_9-#t(pGK^1t0`7KaPL9ifvg<=%AUTw39BOOH_1FUUrM(@1K#n1<= zD1)D^;=soQ`W%Y6V9^9v0!Xxrb;%G+a3&b~3XNOx{n5kYm|UEr>=>|#3=dpWjBWfG0#~O!+^`9Ph(_G73NHC$)=O|%+FiDBUay7!?juJ@f zqay<6GDb2Pp_q*lvZ(;L7t9Tx0+^_vOHO-^^Gx8Bd#K~iI)8FGLN;6=i<3r&bJ!|4AgY@iG2-V8;Nbl5C>nslIYBU&fRzCD3J3}!S2%O! zNjl3wv)B@U;~qNj0rM;n4KsXBTgeGjuT62a z^c5I1M$OPKKpcL6R5j|zrwx$HW5-ion*%f*cb@U*x#wb@jXpqqaM4Fz1Ox(SsX)TG z>c^*E+KqOz8}(%wJuMTaGYK5!xxRWogAvFRWXAZL{=*@Gi*e-e2iPv~l+QHqZGsMj z&;RCtFk|+bF=m$dSM*H%$|1WkYl@K24P;KhmMlOkumhngLqk^xeJKd8v4O=9J>U;n z^oM0nK*=P`^~|HMEBsP{duI7Li`@0O-c4{~K*+FeUSNq&*G~%21SC29tpD;TP;m_i z*9P#9d6PPBB+yu)OeXmzngV8n5kq5*1SZ&L0nl2{#y|bt|0`a&{Ty;x#@?+~oDUD; z!yg^SDa1x$w;k>M?Kr=G9K)kC2o{yuphqt<7Dq!n~iwx&W#kivgmEA*^B{XNQNtG`|6tdIT()6u}rl1y#LDhfHD3t zPqJU&>-u&ze*ULF8?XQHz1Y3`a%^pONF3&Ifjma|n-y|jWA8Kcn=ByW0-uF{DDnCV zn-HKBS*JBNG3U+B#rw^fmkA!fkO+|wOz@Z$e1B&%${X8p$!BoJ05R;Ec(KI4>cf)J zAj9O~O#od)4M^Mbz1)`pU4dS7Zmwb%_?y>5M@{VE`=X4zfpzGOfGmGyXBhOvHNaM~ z$Va;}PDvQ%So;F@Rp6Rp0^_?$aANDYXG%x`yIy~ZKdCp{#4Gg^aLadxIqddH*lV$_ zUL|V!H;J4T*Q=th+T#>3j5Z@Pu+-7hC~fXV>G{vZA+j<#L0VDcJ{VfGu!X<#TmNPJ zkH7uD#)a%AeTggIXfcCW|ux zc+AKcWIIwOqoT5WQyn^rXz(h7z83_691bRLc7{Wr%qP*VH{$shUyKia{3CQ5C16oE zZtasq9mm~QUWub;2XXM^QS^shGAb9nlZ;V@dTCaM1JcQeF^Vv+i!ra0#TW!sAv5q4 z%+ci>S%$z@#=@*%j-xbi$;rsV z>0G)`F*+0~$2}%qkA-8ZX|bw|Z^}Z<0G)XjrVmS~A-V>O9CF5?=UB`fb6PUjL4OwG zevf<2kPE;w!+rI#7@Lkr#VX(o^}GdwbBRoAwnA9|b@EPq6^1q1+3&>8HrX!pM6C_m zj4f#M1?HV+T~jb6pa~4OWn!ipAfE@!RMCMR%&-B`z&QtK;Vp3Hd|{+j)_=x2%gPoo z4B5aw1~3Oqm0$uZ%zuWgCcS=42Lva;vg~~YM>I0aM5=iVpblHg^pTlN3G?Ir4V?UN zj634>e#BgQ`F*h(2za?Et15AKdreMf=Jo!OXU?irO_mE;GO3j;5yLJ;62;yK#pfq^0-cua8 z6@~@2G=x1h2u?{J)ZH1^n(-S#=719tHUjsWc?bgPBuY)> zG@?>6Jw1&5O`Ky68>3RQz~gOiwIj#e7bGuMzodDp1cLsAd2_u2Ybhl;fL$r$6AK26 zbJz;xR&SZKBO`#yDRwQhC_|FLF=zEv{H#wZLLO*GCPLy-gP;L$1pq8y`>1j916PTL zPRQSR5AqYlE*OJ8z@%7TI_FwzbU|0OW^Go;ZH|92NkK|U2NRjWZ$-gB#V_NdNZ4;~bmGRwZi*idK6;$u+RoNCW1tEN(3s<& z%gjrVVWKjss|NEGEb!B+3N8F(k9A`3X+43*_$|l-{jclGdP*QzQ5EzQDC9|gsH-Ne zL$0Ar_O9J`vdtA;2(Pf;D6lYl+JvRsy8B zR)yblb0viyBiCdM$V;d-29OhsqH^PAj7f^ELSqmKlo`Vleqr3nLAWm18!P~Wrr^=vvHtrB9>X5lbEF3`zEPJ#?v zq%67b)PzvhTik?wijyeeki=KPCjBTh@P}MiUG0*v3X)l*e9rQ$Y zdcgcHv8xq9ZOPbQ`paL7N8kKb+->iXpfhiDVkEQ9E!)KOE7+zR2o=B#nM-%x91$vd1TsspJ-@fPg1aC3Gei8<2DPHG`2B#QidG6Od1YptFfdkleTlNH z2;5890-Fr#(Djy+=45^v=nR9fbXq{EWlg2jtFVF@N<_WGr!j_IJ9zj}bSODpUYt^; zpqxmF`q|@W2@Di4=s7}JpwxK+wi&&pYbFT0g5r?51JhV|SX`7|g<1nbR7zq)3WQ4p zk2xd*FJo}FYpB2~ph3h;SEbAHfjVVk7i)Zek*+NRlEwL_vNem~@2n1i2m`_KTpm(t z01!4Q69_D(IA+{xcSMlQQ3$lFJBi(P{zK zFfvQ8Stgss6QyfY!m*@s1&VQiu=gcE#1>__<0IH&f=rPhu2JP!rz!I{SXlPI|IU5D z^*nZ9W=dRRO<7aJGov*qU`L<)!zlr_ik7ySPmj#234teM~uyIr+tVoUbLS0&6s)MMil# zrXRlXC#-P`Tfm_itQCUJ_!FlkSQKz1rWBTTrSXbdc*~sfrg9+ulTGs;#_{Q&_{sRs ze)G5D;PN!y_}-gw^xz2q={=Y(f`7J-tx=Y#pqyM&poTw1mn9BstiayRUv0CSV;GR2d#Tc!m$@FbDLN@jGUjDS?*P%EForXv?7< zlY)DV4Bx87lDSw_&;}AXMrQ?XIhSRjU3)v}eM2a8rBa4QdGuD)5hl*O8o@RV4`yo188)(N{6xUej`OcX){$&@Js4P%S1qrCn-Kn?(!YT47vk*4*ovAa+mA5;2ftUX@Ba+4ic9 zec^Xaxa;_JThmYvWJ40j>yia+7->Q1(bK1Kzjr@gdGTd*7{UrrW6O$ubp){1W(1!l zlNNNFH=&9o5+oWDTC&e8kW0;&LbCO>pZ}To_Up8>G^TOwmCxf%m?t{E#>cNIy~nMa z_&5fGW6#0TW{#Q=3?$^}l{z=k@iWdMx`$E_7pZDtD;v+TNhV3aNhAsQbNFG`j+B9XR$bEr zvbN}CiYJVX*`sgh1^Q6Y7ob1Jge-R0@18_ydq1jx-5#-y1)~y8tV34HTccbej|_h2 z{l{mq!WZ>TMC$}%K^h4b${Vd~JGAT(kDUi&C}TT)D@(+iIUsoshGN)JgH<2WvPdB2 zi+h0i(sykUyWA?(i`45WGHl6RX3exFQ#_#5Uu{lvVJ%gB5T&K|z}XlI_dFGWxoUAaD{a^kC2gC!j#y zQ%U8-0p(>4C^-*q!_W{mnFCphj@%MZ_vGj*sB#VeEm6Ae8T~OZTxGzcK_n%4WRHO=xn;A_5aW zZ=gxy+hIqV)x&yb#G zowooy$Mr6b&MD&}AC&Ooq??$69RE>RDV0#2@DLFnmNJgK-cpCImO#xFfsRNacKx4*w?Z(cP;R7xxfiiR<`G^1mN=`mO@Ep31Y=h5~>M(t@Ot+hB3iN=Pql^T?o>h_;3HazZYHl zImZ~LD(<}YuJ6YgjMNsv)wM)%Vrye3e&cWa%>-I<^pwd!DIZJe0s{k^r7ppWzpKTX zRs|*nGog%ce-XXJ+7n5f{#StB2R#dz`aUy1@_?VldSqxV0GhffZp`|(HU zG`(cZ$prDTH$WHAFZGT4xdwAy)5a?3f;vFlM6R%qC{+=*s!KoPi=TQCXOfLS`R1QR z_l@@fQ?g$O5uHjbOENnXq6$HuM3zAQB7M9E&z_)bJ%S2BO55_fIH3&lDqs+{|CC{x zHugF(zxOOoVBzw}(F}coFQ@>V9E%BRAybPgNlnjS3&;WX#sXVd5_IirZsLQq1K+RC z>QAl&>1)`r35jw=|NUYGs1(sg1E@2gP*F{+VFABmz)ZS_E#(k)v(-d)By-3hM`W~5 z-T*m#^dx?oWZ?2(6psi7`5H`a3po!@;_QN?3bh+i$&e{uqc>9+-r1DJfHBSioLVIN z8`nDc0Q$`s-_}0uDI^tjgB^4F!WX_6|MY+Q-{Kim^HHESmJ9a{!Cp3HvWhL{tLq zE6Z6ze|zY|(+lipP0~>y@#Z?0_)dGF3$!}uUovwO=Sf_$1t&>Ec(&LJ|S`N1NvUuRIIi&@qC0_<(o8ey|}{DR3dO9cr8C^XriHNsfu^|eK5 zq!K=AMjKKdTgjnEOXr8*w1;wtzumO=dPa{fK*81%Oa4|q0{(iWI2JX0n9|^+U@A7dn7SgY|XxFF-?XRmMY1S6(5;#Y7C5p zH6rmd5i7PkF$2dKtj5gSRwr9#oTiBaOftocF@CK@mD=E%!!MXTj^|Wpmy_6~Z34*x zCV9hz#Z@1W1m96=9EG$zqE5_!MnEcxLR_^B6v>DzRToJ)HH2e=Mu&}1GZdo_c;G^5 z&?_iy`(_=0?{!0$fozeQ!C`(8tiaJyZ54-O8TJ`j=91T%Fa}NfT4rRq-3v>^xj06U zj~_l|!U&$B7EyxC@{nPpJP0j=66NSSvLq|R<+W|PmSM`!OUOq|R+58}E-%ufB@iFM z@R%iJ1S`C03{a~=mQC%&ox>#AUtNXu%gN@jOF`qbnSD&3 zj=k2h#BO4+Oi{s+U88(=bbOMSrmf93o27a*q!Um8qh%b+t(#$nv|i8}!3Kh@YDM z1}TTQ z{g2}t|KuJ_^Cj&V^g^OL&a=S$`+U{_Vrl+}z&5lE!61urPSB->&J0Ivl4gJ?ZEP?G zg1)RD_t!Q}9FZn7uvbqOjuNav5C?Skt6z<+YuDmA0{jYHa1-Z{iiVE8$HvexY+6ul zso+DZfRGMgqzg!y04Ow2fp@V2AZ0u~zSzP3Yt(+a71L_;m+1rW|3<#o(DeG0QaSzsA2GET~COJ+ti#}EjNcaRi z6bfoJ2VcT!2)Nwjw3h8wS+ptGM(x=WszcqA%X-#1)`}u z1?VeqzX9)037>w1FLhO$FQGSu1kM;E$yteDRc}|A*KyLRC9!SB7^);>lP*3M{{$pY zK;!~8r?X4NiKKYZ%f&(NEN$?#WlRc=ETktdSs$fq+cEBv#!`4KaNt&05q^h zhT{UUb&YQt{WHRSQQ7af$J!Q&s_{cVZ zS}<>~{cJ%$H9*LAEUO-4?8;5WK&z7f@5yPN!J zwon7;Qw=(Of{kmx06fM#qk6}ZB;dWSw%IR0|7NN(F;@H^ih-a2^pH?C$atr-?2z5Iqp!s#0j| zXZ?(`RN1u)2?;km*1=BGF2Isax3$mzOq}97xEXVzI)s18kwlKLR~VdF;q&{hMFx9f zk5m?n(ITnuov>erspi;&(k3>TFu9TOQn~VPh%M|{z)YHUn9g$Xyow>{0h1R2<^dbJg9XLTdJEpaB0L++cW!dsN2 zJDbIKfAj#agKZ@VJ4lukN#MX`(CDc|QOtr?^zc$2LHR6+!!nV5rVS%qKopDQrkx53 zZ|P`E5R<}|S@L9tHRS@c6uqy%&jGrd1keQ;M-kynV7l4_2aU51@EdjM(PBY$L~d*n zAWd-s1st=$bx1j8fpHf=_%azqURM|f_$5x-K3E!<4PU0^He1tQqU zR19%GHR{K*oNE?ZhPFWFRtJ0z*f@rO(-g^;DA0!rG6{igAF`Fu_{_f@kPF5E8G?n6Z_X z3=m^5u&qgNWev(W6o1#rUXrnE__Di8lFsv^UTV|m=K&SisfnNrom;I0A~KUJ#@nZV zUxr+kO19;p{kZ%dpY)fnS*tqzaxyx39PQqX=OcgqjLim*$DD_OKL(cZhmDLf2V+!WgKRTCeE&n7CM(&xI!UDp2tGYx;~#x$D>h!*hyI8i z{B2mKy=z<1!tr+x09)wT?U!%I1#7o~-paV+5R(6~?^R!-uU&EKLsB60A+cG^OFw1; zjml{1$OtAz3WdEfga*pp2 zeAm#2BICCmLJ*wV%w+edc)8h4U9<17l6LsxwUXSg+T`pncT@HlK)gpA%vNhV9%Bm+ zpFE7a&p!`{H9k_2!d4~#^lD9DO=X0+i39B29plq{pRE96WFxz#yz~w3pXqt}3V=*z ztKKAGfL^<#+E9mV(C24aD^Gb4YviyUH+Yt*KDmCbw_je&losSxEn;Z`S8~B7|#b zH6Rf+vi8K!GDLy^>}y~Bx%lmW{4e=CbHvXUIh~N_*YQcD^E9Xay;2S{Fxx4aWpN%7I0i3^+$jxF89aAZ!u$K+P!I6=~)E ziEFoZWBSMhnSn7cj62!RU=5a_T5Ox&k})Z#5jHzIIpI3^amLyJOO3&MR(vrSY=bd5 zF8vSj&;D%3*ulO}kLUp=!D8!U?0oS{@%X!c60csr6;j7wVW4CK2?QhPx$gsL9Z^28ztu9k*<;!$#AF0z3P|OWb#bLU)~tYc%)=hq z|C7ZjpOX!2?QA4)ngT1J!wocO2(ke*aeWQul6D3e4Vg#FrxT3BUaud54h(!84>JZ@)9f+J&_XI$!mM!06qEtmjV-z(WOHhNyw z(=vu?Pvj`W&U-UJbZTcCC&dw66V|$oA#5=ISw&`~h=G2|j z?sTuCuSN8+0NA+x;tO$(KhWWryxLPO8^0tGe)8edSUmV)ym+k;|J7gnkK+IPw|^9G zaRe1-W3Xvwmuptii3nS}o6)7GcOgK>mRHE7fPGZ3iHu32XoR^Gtd{}44D?yXU1i(` zlZh1%*a!WSw&1RGdrM_jg^xMpf@H4BZycpGS$1Kw0l)Mtbgpki>(&;Z!^d#>(q0Y5 zKOcM5>+$BFeS?G-M@{hk^w0f5ym$DJi?JDo%FEu}A{*vl`0!tI5}*=bVY_mT))asR zYuLWAoz`^5d%Co7RM5|xuYD@AB$d;X=W0 z|DHdg9RWSrVGW+Yc`aV1&vQU^X>)HcPDo%L6CYmxgYRIs9a_d%D>kMqNT^Jl>-1Yo zp0qiQq>V2aKW*YW7%Q@r!8ZB!ctZOXm78r15;dM^utZksWP^l7W>OMDIh1HyF-a7J z=UEpwec9eKVDoOhY3v{o;9_En7wCb!!;xw~diNfFjn){*8f6C!<<}X)ezC*Tj(+8I{qzfo=x(_5924s_@*t^uEP;)oAnN4G>OggpGs_B zJbe<2dmlvoCx41lXCOe-SH_y5!f3k&$c0VocOg9h#Q3%fHsnT7|74Pq>z}7sYUO!) zaF_tR;8g;^*Gd4u9P!0AgvBRdFfBYfzc#t!I#Y2A^A~i(px}SNIq1lUm=q-MMK-f8 zND#=rInjucI$$fuysTa%a*Jzd)7sHh`vQQO>+v%G_zSjUE0UX=P3n_9@>l&h6HV1^ zZ9I=p@&uN&Pk|*)ts#8g3J43BFp(`=SYz@jR^*yr_)?tkIX+00ualBuPld`+B_z|s zG0^@}r!SrxfZ^n zh~USLha|{&OS3fXM#v5s_Nl^SOsN-qj4!tZrM^Ms0N*m9VmSoom9;I_q7DEZur3P{ zi7qGP)W7&s@!pSr6t}2`mGK>Jm^GnDEve|`(mrcGcR2wY!VHU_7FqU3iSYDUym!SW zFsrr`j=`Mvz)N6@`qnlJOf`7Tpst2hJ8JNC(BP`&}X!-tf%oxGa_Y#7(cqZb5o?!f>&*KGzc>XKz>a#^1Fs(ONVFwtpEd-q+|u`z49|2HzKIAZ_CE~IjpHbUnbjQlRVu*qgRN| z)Be^B0C5C3bmwKAW672T1_MP2c85zP@M(Edi@53+lR

HwJ+motz5jNeHs5ZsrgS zq+?ca@Ed|v$~+Z-sk`_4fVmCI{S7t&diJV{x&*Aq5Lkk;=gNz^lNgR6u^XoZ5KF~b zw$uvXZLO%$CUSAXdvMB49O1~mQPyt=BcMmO6f;Lop21hZG4woXfpxYA+oQ*-e9olC zRs@~Ar95vv$3U(|fRh5Wq}}6a=oEn4=NcY|S705>1Xh_l&z4l7vw)Q~3}FpGX|LI% zCkHsZt2Pq>66p(>Jwbxti>4-~UMJL(u{O}Kj+XU3D^@1Pa><1~;|xkBrv{6&e`}ur z$jKA5ptw=eU|b+$b#u?qL9?s51;{;LoB~uQ@mK%Ge;x;v$a_QNM&|UX+t*^3zTT1{S9l?eyWrfPj$r7c<=@`6n0E`;zH_*Z3&@+M(^g9bc?vf-8 zV8bf(vgTRu>m-VQ^)GxSe*LRojQ2?z-gxjhzV+?zk$b^%?oqPZ+=>HAzb2Bhkhb^K z0N+D`NFQftpz{biH%um0)VX5U1`BOY{Zp2N)a1OaD0sA)8%OzY3fQ<{))8H7V@H?t zrr+2_>1Z|MEaNHmovVftjUmZAaIs4{|e}Wz-_U$qh3YSStByyZSJ{e3}0d0(2n!zy)OY zs_1trt@5JmVwU90p2;zO()ZYcmD;MXJx{h zcd&h{{GOLB*y?aPf;;w)NqoQmoj+xBXOEyqs|)?So5-j$pcN?2v3L3u(G&fqefL@P z(w31ipv_IHz;*l_L#{+KE0;Mdd)qvOwjeUVxf;OGj`cp z6G-(~|7r4I73BgsTJ?}2v=Zo^8+Wv~xL%Dao_fTMQ*WkuL=q(5JORndmYbyP6}4~M zwkP#OlA-}S?eE5M$tzFtG4RfaC!UAhB#ChYxprEtZp7*QINtlA{r!+5;!GUeoG0k@Y~dVWY{?uwV$)+A1xYK$RoY?y3`wHwNN&h^$uNc5wjg~D z*j*BDDzuB_0Ll~W$zvo5>?Cl2YR8Dpxn{E>ppEMb+AtJAEf_+@L%u5ClrfeldE2rl zC`_jh_;)Lbf@}Pl?GDPQS;f8k**L*35=^)}J_nFUR7#0M!VzjFRh6?DY$`U6(K2A{n|GO=>AS|nMgl@g zuI$B?(#lCXkZ8#K0$bu7T*n~!JbH`qYaZd@1t#2UFowjG32$1PBmk6E2Z~m35gZu`nUsi6 zrXy1LdyVp?%VxGH6X+tk2$sD(Wg(xVl)!g|1Zqi6AT<{gh%MKdmF7{{B@U;Epe+T= zY1Ws$n_&d*pw*dp2stBT{iNRkH2iFaSZ5;Xh%+2mI>rZGbhDOV$&w5o=0D4$IvxEh zteY>(V955e7%MU}0hJlOYc7b>>8J>1{t6`Zq_SWI9I~fQXk;VUCN~D?5XUfb_Ph!{ z%3Rwrqs-b0s@DfgSS}MF3ovT$qy8uE!y}!v$_>b?O0k&GP3E*l1(k{r}++<_fLe|W+ zytqKYyeOdjg7FO4c+%eEEK3Zh4gjy?pan7u0z|hrilzb|$EPW=I6OM0ocdSQs?A(dJ|X z#%EBnm8Qp8geFmL^hj$$Vx(R!VM(5zAHa+$;&t4A|NS@y^yttVfCYCy+xu=X7*W32 zU}N9(k}?ZSbC#fAB{?w3>%myy=~;MyD(cA5DUpSJpUv6n&3d;8PHTYZl0YowHw9cZ zVXy`SUVAlVbJBUj%&*slsX-3LW{(cq1gzW7*h7~+CTxFio9YDiKq8@wNFWQBRx4Wo z06+jqL_t*M)4u8ak}@N&<+Vc-4twX=$g)e%c9t^!C2L*gxS$h` z*8UXTW+UU-DRY^|tNV@k%4`3A{1?Ca^YJgf`Ga_!?7e^SfRh#2VZB|6r;JgXm30Oq z;W#(2$*a<>{lN(wu%;`Vy~kncgJ;RV@HX^Dn@vdzeQ2(e!9j<*Lf5je8+{ms;x>sN znYPKM55Y0G*RjtDOtY;L9|3q{4Li8Nz6m-e>1BX#5BsuzEQ{~b6)tel-ebmQHUaAb z9o>}K$3LAP9_vOZ>(NePf`(u5Txk=7DLeILczX?wXe60ZM?P^?1a5*p^YW-NVkn@D z>jF&tr{60hAA?|8Bma(Vlo^bzIYo3&AS(+vB}tMsRF-arOSaDIR#@D4JY)?=044j_ zt-4vd)>Fy~(#_ znHv99W@4Gv;%u{uAh4`tqxcou1J%DsK2aPyCs)1HX;^ zr+!x4y@oX-kV-0eF1GBTiK69CWnfUR<7)&fO#*Ey4J#)I7b^*QblMgR_dWpGAam@; zr^*KtWRkCMnbJEqA(8b=P|x$Nv*GY3Un7VgK8|A;{|V$l||X|w)TMr3hil1&mDuy}6R zG&br(?T^%V7Fkz8U=AHELw4A@XzLck2Y67$Wvx_bt8G@KMt~VluDE{V8i~L#KK%G& z*0Ml*80*E{je#rfU58mVrkTVJ*i4=Fxwv7H>)Uc@C2}Dm$G(*9%szT_*Nu2N$H^%@ zP4r`U%EonKYZkmwymp;UhY&QZUzs(Ta2nAF0HA*Te(@G|PY3Y&uQE?`iJ^vg5va0T}i; zWeRrYZGhd=`8pGoFLccR;)v+=;JI`Jh z8b+1~PR|_1fyb-=wji*efMx6%Zf_;=lxjEQk)p?A=4sJ&V%*T>*?v0P9R;e9#@bK#7{kYOHgF znKtI2-?D5jil33rFi06h{F1Rj<q=Io3u8?B*9-2TluxNSVTwfxE#+@RD{xtH=fq=4^jz zYTyPep$Dn`U_@__eMR0vR@!U1aVYJ^gbAy3U?M$J72uhUDN+H+=5>ZT9s`bz=qhzO z$CxqOYk3T)tkHVl8FS+-fKVpNAgw{OK63!dbjVy;E}u%*?(o2f3BZoBfDd#K0QzJy z1I&y-BYS*$(8Wo#_!*hOHtQs+v>@0*BAdm|n>aY8yn$o(cpL+|ttlgP!DE=LSSWwO zGM@#Q0sLf%JXGIgPbR&^X&I?0o>jEk^IhVddJ6EO@% z*~(TO(o6Y}V}m}p_g>ul!9DbaO_n5AK3&co^nK32RZh%`HV4Y0Jf&}*;W!*;%os}qulw6irymroWF#2R=}NqiD49npr?J4jgC)sQLNlNgwF@UdJkNXY$C%|%poq;Hw z<%?a&USfrxT$_1t3r|7eCH#RT0PGu|^9V-wAP90jo7DB2`a9Py$kOUFNQ5%9(&VY4 z=<_``%gvX!U)V;+J;n-#A4YhOxv*U53da*MZrgJD_`2mK_D@;UAnLibHRLhJhuQCG zP{sAYwAh=?M&#ssC^f>~`LrY-4CAqN6E-~QTSv&{5k20XbTZ{sg#oNv&7;WxrXx$I z9);vQ8RxoPW`pD~e#`?7KJ{q=EH-w)Nf;HVx94x~)6#%%!`Dl8Sjo!4fK>2B%EDMw z;!HMmOAut7^C&{&pa0bTGP0A!&I#0rH+m&*o=!9naF>x;ksxnKuyxAHP1(L${-*2q z$U=SQ^=-=e0JtXr-)Y8v-yP=1q5LFHo&|2J7UD@sh&|EVk>7cM0%XOSY}ykajK>m1 zk|B~abxtZ+5p!H{=%K{gdgK|XfG1&c+75PU#m$pY>}fyeS~73$O_>4=;S1=2#jhc= zOTc~`-)YZ%l|MLl&oVZybU6CUb_E}@Q6bjuz~~*(e_5|iVt%};$;MVVd$TX{0Dk+z@UfblcPoftGfKSuOa1y2v7ar3#vyy~xAm(`jI z52quDGdR>-gCJgE^UTk_({wZ>V@Pd*B1_j7;!Ngx!g#Jw4>3fWu^J?pO;sU&ZW)hQ zcdC(D=4Vy*irwTg;vnvSMdk8dY_Nb0tw~<39Q)KzPfDr{^K)f`0K2ZK51DYdNk>yFCeh7nX@CBQ-?Gx|H5w)ZJvKD6+SSi|F3yaN*rA&v zSHzRKhRNS_@>cmU#&q8*>mU|0F{{~HB=ADc1qgv9q(_nJqTqc*JQmN9c`|R-hGF{q zw1UnnFRL`lk};t~Dbe7r>@Rr+X5uw)f_>)t5;xQJA=fT=Fi1m1Bc3dYEi?3YiEmiT z7!z|@M0^p~C;9Z+o;iQYuT7G~laC(81~?**uI7YQxlk)tNcRzB-&w5wy6EwNGpwhBxCnI^+`7DAftN?{lQieOnk>GMt35VM(dYHRCtD=WhtGuP)OtOpCwU^P)2Tbgj7%kaRRVsD7_9>)>kE7!|Z4n1`*5D2rFf2 zEq4dwl!o+=_%ygILZ6Vywzs!o`Cye&uNc`2QP?NR*&31hy)+tRWm(x0T`~d<*i2uB z0$I5>p2*-?sXp4&^Az1xJ^-njA^PMQk5O_bFR_&QptQ^_OL_AVPrSPS9`6Ozbzmah z(dfK9vvspWuOorwoj2Z18`V5dx6jkr?!EUgPEUDa5Ayb;3WIC0{4AYe>4EDBIO9Y- zKT&mZ9yFc;9$6cV$3A75wl-|xIsL|?O9CN>@tWaI0km#T^nN1N!xH z+$*gt|L2Cc6;9i;mOXaGjx~4 z=L$S-kRY0lI^o0$St^hDx;Q#TjWjq|1o3+?qn53S;q=48)F3aD!AIvQrK#NB$Lp0Z*fdU-{B!;xB&XOY!zQkK*rt=N){4 z$tOCF{8DuV{kxz)+QiAH*lEKuP(whvej|wjTlBjd@$A#H<;JnQCh)an-Pbr>J(;YiU}eg8nc9@l zW9Qnv$!vN`Bx5NVM0t;&(U!G;!I(B+@N(yJ0<u(?Vg5*-qY_GNn zzbty{w?uA&RAZOSo@+2iM~o}RL^m%r@GCDpM=v~f;Q75WvBplT4lBkbG2{stj?X~V zC#jN&RR*&A{>|f|?7eog@_@hB)hos(D_23EB!PCexA@NQ$lm^0^~_@+JDWcApvZ|d z=qlAKPa&{!k)t(9aAsT)ptj%4;&0Hg^Gr>@U~HH#rqxPBqF(hhS!o5A?CH^;_5JHX^|!PUZz( ztV3o6Aj9X=69^h20kAI=f0vG_M9-G`%9I3Q0*o*DPIO-qi^-!q#CUi0`Gh6rZhU4^ z1`}kByg$J^l4P!=|HBDgU59z$D0v?rd=}5eT5@i$PhF#8^EH!UZ2*^+{9#1-x;#~QWD2}jgW^YSj&i^zSmqSTqqC_DVF}&Rl0nZt84nBB zNDYSWv_;dnIF6bz7@T~x@nTN)qp}g;)aeAkWJv@;23j1Rn_qkank=&*I;(MVQvsL9 zqRa@MX1n$TrH5AV9zsTH;V?+0r+l!;o@0x7CAKL~qi||J!0_BeFvj^CygU=uXH9$R z!5X8m`Fu$auy^;gfB_ryWPr1l9e9uDX}T5*G8}t!b=7A5mda!mvTV#LQVyuYfIfZt zjGqnMJS>3oL01?gFXQCVBYJ(5zdM-ExsFF>RT#_NyE}2~l|5Kh+923WqQiae`8)CE zcYlCN<#-w!&uyn&B0U225&h2qz{R;BA1%TOq?QDNiRC*(gW#w4HCM=y#4ZG~pn66WXk>B4W6C_~e=N>BNa1JJ?6B1F6b@BXIIu?>Mnvi)H zsQ}br$UQ%`2d{>_$!&zh@;1k=wYTLPoiKjF4B%;3(U2h z=i=y30a)uMI!U0xeg)gE@f>IC(+;cjfZ7H!IAkLyc}tVX#I`C~V&ZW;_~?l8=^1{D#FRDl35Y{%bfhno!2|GEwex9>a-ylt zg?X6t3K%6om`-9I{TZ>RMAw*N-1F@3nPcI8R!amb>ZGy37Oezskg@Wg0+x(fZt8W@ z&4SmjY@go7%`KQ$s%x~uc>Gg+2iBZhOt1m%SMtN_YxkhRG&aGybH$t!7`LUzy){;1 z>-Zi4CWA>wsR`Op69^TOH|&9MkzN;2*y1GcGFeLP4JfcAk$&4s=bFu~`g2e6F~(Yn zF!2@i8)pT$o(?qT`xZ%zzV_kM18j;aMp`R$7=0YlPSW8q^j3gu0m)+*TU)iX`P0Nf zFxbGBhl?IV|*qKAU0olEe`R!hOj2Fiv7gkA>_&WUC?G`vMs*Jk}#}D&O8mF zMr%u#v#s54q+C4G54I`3_~oCB>$k4O$4@!(^%;-oCEhj!qRN&vOvJ7kx`0&Oed(2$K4w!R?;>OxpOD35vN=4D~x+=RG!6N9iWQ+V~rfX2~pHF;~d9ws< z4`gb$aNdkV=PSTp5*&3t3a&_*dIp;%t7ZVN*iS?Ts?+lx0&xSK?@e>%#RUd@^x*>n z8kq%|_1Y$C7*(BH7X|S21P!H&@z@sOu{E;5ItPQALE8SPI%mJyqnBDJlqt&5$I#pm zcE%=wmH>!YU~rbL8`HzmVl%=BIgd3kfU&(mHjn4)u)qMfHl=hogLPpV8ZcDTW3o~d z@X3?MiIH|5K9pb+1Mrc?C*M1Yc=Q#0DMyBR0c zh7I$PgdTIHPFNmRPh>3IgwrAbPCuZOg&Yg?+qrSf-SaYYWpqOacz5;Sm-l}@sY34v!q-ymKMN6JPg0BjwHS_(>> z9l$8g-191}ny7aJoO7867mPUHGBU8RG#g1x#jq_(DDIR0T``wVar2lX6lrxV% z1n_3v{mz(K2RGK_VHfO;w8VUJK;P(|!5?shtV%f33tCrv3YdVPe|ko*FNrWZQl~`c zn&)7O-Q+6pw4^Uv)`eC6@X^y)+_=q}UomJVV^wV1mL=aWVPH~i-@nH(+JB#pQ$h|h zbGjwZZ8eapZuWlacYOS>)hN%cRbSQw9@m*nfJ7S>+4tk4otsRf0IbBd?K4~$6d@W) z*tqB1rW0Tc=6uGdPI^wN_`?SujA7T;Bw+R%uUK!}WY&NmH{F;7Ga>*GC3E_1J;&PQ z$%J=nsm^>63#0ffO z^&){7-rI-ewI1(&iUdyhyXEN;?NzesI3@Im-hMUNrQ={ z{n-*pRu2}$z!~@=gPt^A{X7qMpxx|)_i!z&6>+MB4i4~>o>qcV@+DYdwZt4GE(8;W zjl@|c{NAi!#TinDml-_o=Lh0Q(jx(^{!pBw4{;rYKKe=V0kTQPg%4Ms#SQ}IbPeXq z!ld`uy68hLmQxr+Y#}3<#kX0}OM8;U3#^F=g)&T)X|AoDJo;3fSBJC%^~4RT{Mk{U}%SMTgxVW0ic4>^vV{ z*?E-efVg17`JJ~P#Opu2Pl9hBIJ#!F8(FvLh3?^lJPxqc*^NJZ{hj#JAHB=*gS0KM zM(Lnt=5MvP3&D|+7h;9~-21S_{Go7Si0H3>zUr!#XIfljxOX#)Cq7BziOmi;`|%JU5eu!sT$pR(q0XcZj4;MyJF8)Rhy-J930 z#hc&$R=oetk13m)y`tc(l}|=FIOK3;z$SN({xcO?2Ap$uExEHc1C9zAU=SWJ<}q-t znI~K%@aOs=3mN>wgMK`C0GP$8Y{1a)QG7m*k&E+!G@0I6m1sr^Q7N`q;2lVpa^ z4=Eq2M_G)yhJNr2gLG;^i;ZI$bW9e@$I7WbWJOW{o9T?X&C@I0{||;%5Bs7;9+(4+F1hhNjoi)1?{WH zwWrvWXLG7o_DniH&uy;4Sfr;}VOz4IZo-o;F~MtW0cZ(|K8|yVO(n2RaMb_1*(lkO zfydz(NaznXF*8Vb?7Qeaml)|_rfsv7JF=fsJP4K};+5D1ni>AB@x z+f$4y0@4W}$V~>e@CZC?8%hKNb1?bIp-c7~m-xGzB;8bplv2E#k-4i6?!7R8(a_`ZI#M|DIH(4%j;N`hyPrOtW-)=RK{^V4^DDi*!PfOB`e%QPk# zvR>M|Uj{^VIvpp*X%Ip!OP=addTJWJNVZczBU@!v%7*}?DlKc^=_|HnSQ_>SL<5wI zWl+T+*pkP%lJe4{*FnkyN9RA@6D%q z*#GO?1K{%ogH>LPRxyNJcy*`ad}spVXa;;TVzciI1HP8oVGj2hMgWf zOG##tK)J)?QQN#$B!KqWY;sJH_S{boxTjv2?1msha)v)5u&IM4sU<)XTdpWCqnkX2 z*0zEzPOWKTleVMeOD3>7AwV^@Ucv`0(J4>)a1*oar3{_zS+br*bWFW+V`3stxQ`V; zi!W(6KKAZLQQIeM=S!?C@lN}-x7_$(a-Z~>pKS#&v9V2O&BCd>-uI=uFU9}Q)0@Ru zdY<>4@7d3(s&i^zx~h6-vzv<~u99U*j1^h7EIT$78_5L8crw6XVk9?#FOvWfkn5Sj zAjk}m%taE|lNgSZI5V;(STY?`8!3t6BC^>ed#kN$-_O3~_j?YRqFP;jmhXGN_kEWC zvp-l{3t2c%KBYrFqGv0{lAhrsid=<7uKf^Nbid7u&1&4hgYLa4OBzlV<9S`veWoyC zt!_6JCY({wYt5lM?5BE7eQK1{bRvt-`6Xn56W4rpwj8}eUaGhNnl2*?1+kORbr(ZO z#xtG5Hi|mH-o3+Qw$+uiN>>FrKu>NFdOTvdL>@q7gt!2>Evmygsoh5#j1_(=P18fR zLE2n>cN@8a(*iJNe)_qofjkq7<;-wo!D||FVbQWCeu;AdwZ^Zsn+kx<=HvJv+|Ah* z(Bm-_gEMVYC>Zif&nkatF_RRIOD9_<^? z!9#>J@EW5ph{6l9^^58yuM$xNc%&vNX6Nilm7;T_>rK-c$SFQXb_V!`=GZj$ztXV#r_-a`Z>DMZZ;F1{r}cx-Cqs&j8(hn1A6(Dp`5R{mTLx>ee(1jl zXxGDDS1zK9kU%2^W1mYT9-G1~;}3U+A@v|=u_$c03=hNAIPkVa!Hr zYdgoo6!|XfQ80j2wbe-n~{Mnb3wG{9eXK02W-fq@!o1g}3{l%|-g@*py?9r%xDLg*xyN2lix1rt| ziBe%$fys#|)0zY?8lF0HL5h7M=NA|7 zb{?gzwY{{R+ou3x3gViKb|}tZw-9RrPPg&Bd0M817}E4yc$Bz~of?!If+G8c3IL|b zaWtfeYCsSRf*3Z!p21P+t)IM?e){gC^wv-Bklzat;}x1*Tu$>hK8+EEq9H#=1!<_P zPtivdC-B{tUKiG)w@EA8Atr{2Avn&MJ%N7sUkDIA9GuTy)i-}*ea32f(F3iWe~*s z_ef=%OXIZLnj4>CKTy2TdW&4-%PaHbS<@K=o`u>2KpoL3pvra3JJUeq>V&FQ2qT4k z9)_pU=qQ79{-5*Fa($=S+UfCmqt8F_fmT0;RbfZox&og)q6)tCPw{maS! zH~rne`&(3uBVYg|j{D65ZXD|?sMWh!p|ww=0Z+j<_MFz>?Lt%0l>y~!iy-;Qrr{y| zP$n7nJHnOH82i#h{@^nrOwYbS`xc1Wx7Dz!o?*SPJr>wJ!b6D%SW#7`8^hL*A0QKL zjt*_`S;&j|7y4*k?)oaPRjC*~|MS za+?J;8swb`j4o(Y`Dy=a?<0+xgI%GumqxZg+`k>1p|@KLL(h=LzaebfKBB3h#gO3P zEO3a9lON6b%QN_7!ZrrAx#dQ6l-tTUi)t(Y6*HCr zF%}#dL9we$c+cS#4Uho7fVptSu%}t_*E4vMjG8Em1rgC+O0Gf~D+>j)8qx04II?M^ z!r^o7-9W~Kj^j90@OByU{p(k*5?MPZFLsMJP@g=TIxB}WXLfZdO}t8;rH&w!x4PEo zitUy4ENK>Y zV>*Tp$4%!$ho1t5^w>fk;6O7Ui3o5XJ3u(R=a|TyQ97T~h~PB}VULh^yZZ$UY*X&g zb?~4@o6{WCi{?^Wi_m57ir22E;-_9rd&)LxpV|@Hy|6~oJfJ;0j5t}&XlH@9Gthh& z(3hVe1w`kOpiBXeyi)fVB@Ag#Du8KLd7?FTWm5RJ@j?ffC9(uU$6LoAdbRT+3KLML zmvM*z1s{zb*r)Qq`5T&!*Eth5KXk7H4?d1mT10FPuC$_~OkEfF0(uyLEyB5^am4|+ zmrliWz3y3nO7)eO=JULWKDGs;sejb{pgR{n`E1(bxr3t6Y380cqH6*l(%@rFgg}sk z=ttyg9*2l+OicmRJF5||m{pc>p%c0x21vosYAQQ-%-)sd^k^8j_X~ z>+Mmk3`NFA3<<_qK8Rm0j{^xjWIYmkJOd0003{gSlkoT^8;&AHZvO$27%uGvp=OR_z3$X~R%Y-n^Q2w)U{{0c)(;hW}@P zIz3*7=wx=&IaCVSHeLgSX_mnbxU~qQRS7|m0)nzwS-#A9?WIRM=MxUV_g1 z>C&Z3Fn=elTwKK&i}#CZZVu9L6r+0}4fiR~DzcY3p0NWRw2-pn7-l&Psc3u;?_0S) z1faHbC29<_e#M17T>F5uo{R%($W$mYp#bjR{QGaDKm68r(%<~rSJN+j@ws$~yglMJ ze4)&+nROI75fqe_t(m;;fc-b@kAw{69Jz2PRfV4BrukSg<%|7_qF45_h1{@*X!KGh zS4@L>obNlR#Jomd2em@eO!VS&NFf|LrBrK7rT5pAA&Z=i-fu#aycZo?5XR=s$v2Kh zb(E!DVZsy2HJ!+KxI>{4SL?CG@wtz;DztNZ?`z(O3lS}4wxs5)6 zmOZrBt}WqAAy7U#SF~;df+Ie9jzlJ_iu6O`Uug#uIxK zJ;C|BBYOoL8gv$zIzrciF@GxLwHg*S3Sx^Z-647CRr+!3hIq;xTW!Q*`~C*c2^ioF z&%(|VrUyl|3h<`lkw@Cn65!Ly^%jwvT?(VBcK=6dP2hb!(a8brOj6a~M3z)%s_-Gj zYJ_MTItm0|yJh;c%a8K@aVR3C9)B;+iM%exDI>Iw6d0+SnoOwK4&yFnW>i0(uJt zm2Vm+*40c>2&4>Ce~xo~WxoymCvmoz#$n#+9tE4`_%6=_VAWGR=kbZtuWE>me0u1_ z$16{y^7CIv+vBqs^6)<(IDqGc6`MZO)@%>YH1rN=E72COxd$&o8(nl2J+L5Bc86!i z;}l+D6mRgd_X_}~JcagbPltfxAMTSmx+58uyG}X@pgp03$nQ&ue&Oj6f4~TU1g@`M z8960F=E18F%?O-H3xFxHl3klh7#c?C1VPNzb&+XV<6ggWXP4WjiKjB$oUkziBVn-_>o}2sX zJkuo^ftWR<1 ziHbeaY%%K9t)}Xn9g{B(o+_(%zzA|9H^r;qVIwG@=+WXrBg-5p0EDY~sOHPD>S%8T zwF8g*7@$Y1C9Q2G_b7bIwGkR#Rsm$AdRG`dWBGm;rWXV|{lqF* zy&e_wWcaLYiUf>KGgBc{NfRhxwU7)D;pToH*Ek#^k&Os6R2i2C^l%@+Zp@_T`LxU= zL-s0^;b%kbYO9PS_ug^(_P5_j+j!mzWAjYV`BXYQz0DZUN?LyA^F)?L zqnIP-2y4-HK?>DAo(UujOu)W-ekgFKwW&xH#(G6zea>2?0}$Y0T_c8yb#WN$z3OY*Rp5 zdmB7E(SnsH@zm4OOz>xqRiXsj6!~e{{`8$~u8onyzUg^2FRu*1x5#Od+`UOW)(Z%u z)s>~V_qE5aW2{U8n#rX^UbF$QdCsc@*e!tGr!fj*<~`5n_Cv7z9|Y_g<=;>`d=cKv zRhLp31#J%D?|- zT=rW)YkG|Ni_+~Vj}R7TkIbPAeuF9?I8=LN#rY zCX?x5kRvs+cnwV9P$t+)%MKnxc+0A2J*)mP|J^;VQNY%u=wo(~Vk4e&f0N=U{)Dj% zy$OJn3(Kpk5hbwjss()=qxhkUXu~JC(K93MdbUwi$idrXLjIj1Ok4-32`0_}YT!KV6XXiM!!yQpRHzoZDT?(Db@-*s^9eSxJcEUx_U0^++ENC9Mpp{f z=I2cFu?vbGcqbDo{snZ(Li??EX()e;p$$&Kp~d> zxR>Z2V_s^XNR7^b+Ui2uT0bQBnISVgPZLj%5tL)V&lu?^tE5OAAnO_^)jH=_$0JvS z1`6DBER51#dhhk0qJtZ0adjmSI*OU+L-a%gg#BhvE&ZC^!6Z=!^ymF1EeYH?> zJB*K>sPR0=DJK=$okzpn2wlkt3g@TH&tOyvOkAT~R6LI^ira1{I&L}`cR7qBJ5+dm zX+ZwA1BDx>JhEd-^rKZ4sPfQ@ra%~RvIlO7d#XQTzqzh50;$FI;3LlN#)tk8QY1NDaM zHP?uW`W)`;x}$plXX`#hZ&ns^j?M}XE;Ng8+)n~zp^6a;%f`KC_pdwtv`dJyz8$i`Pw6842nB%W~wjyBPivDv_ z<8pdty)A3(V`L#51s8x`0NyUTTL8z?&X5oP<-hjD^sVpwhypCWz&XWz%!31CI5E^8 zg~Jfo8NssWl0+m+&Hi* zgzGd03eGOONy!*}tL2eTu@RiPKsS$Py?4S|;7Kafcpwc0_7eV`#o##D*}xFNH~`lv z>+$6AK6{}aV-2RY87(RmN&AA<>oOp6=M1`?T%xFqzb!&H&l%4ed#FO;Sp;|NgANLd z)%pnQI%hhyFimX}*M{H5Npo`WmGc!&+57qHH`CGUuOgpt+L8X$?e8-ElI{*W_CrVB zaDGNwY?Ep&f2tSK$&kYhu*urYf2tFPwn|iREC?~L$(&>N#)(}je7z)3eB$Z!)Tcg?{`e36 zV3g~`ei_!Tpg8p0I*I9UV;5nJ#}rH5o|P@YWo>2zSWIv)qtus4_|KxL+E_7yNJqxj z2$zo|9JlCm_tjtjYWfeq{|E7GrW+YfY@swFhYW!nz)He_Kn$mjlhdhU9jAE2hTm4; zmi1_2Kq&AX5&Ymj=?W|4!{FUBouj!Apa*nU0C47i?%M<&!EdvBo8k+w?WKoJ67(=e zM+B?8r^(mrB8(bTsp~01+tUw~Fa*=VUU>0E4Ck}-1PtlpQNvQR7}q=|PKNLYfF4UwPrZAd9nU3o^A_?R3*}xO|qjGH2)6Z zMflI6M_V&h$XN=b6I!w_F-K1|tBQ0KRcL6XYw>W83ZN}hCXI!C3jRfoobd%yPcl2C zUZg$NiuKQag?E-$^zv3|a6bp=va0;bjq8Lt8D_FVE+y^mO6;Rmy$Wh#V+cI=H&JMa0;0PY&I|Zv=G`%3E6Ae+`8bAtfP8)?Y@=96EqbFpX z=*h;rAK-mhO^v(9>3j>hHnsr3!)wj*hkzb|rF)xWUy&5Z{#M$=JNXFD*TUr`TH76@ zNoYY&tYE?xcyj=)wsMT~GZ)$}4ps>}#yN{nJv=&qhIuwVV^ko^pX$moJT(%;%3Fc1 z3em`wp4y#_ZGfLCKb#Zy6P%cXs>i6#_bUi(jdm5guX)^A=*A*M`Q7J`%xv+-6NT}w z&SA%(69m3=&*x5&lZN_~J7-KPljhA!4Zy=5*V$7Cb6Ch`idpnNCKY628l!^ua)2e< z{TS+Yo$}HQX%0PPR>J`i$odB7A)qu2y0EmEwjXSzxvSHhYdP&@3DbIP@TeU^N~RV7 zIFzNfUOFfG)CGt*aSBu)t>hX+Kx}VRotk3;9>p*yww?9Y*{^K6`NVVS#^g0Zz6WV< zgK@+}1MPP@4zJn1%wpdRdzV3JHqlucqUXrxMHIJr${O}MM4ayD00UDq^ww(#kA~%7 zbh8&9r#VFLR5#y!NA7BjYmiu=CjIzhF|?u8NC!d}37{)RN;K4@SMH^Na2&A!Sp(XE z9H!DB4Mu#1C;Gq=*9ytU;K-f%2@XI)7 zWO~Us298CTvqaad*jLs$ZrL8>LqIf75e_BO7xYlu@>0i^sTb-M&p7hSc{UBs6Xdlt z*E`2Bg6B2b4#~6C8RR6m4szKRZR(<@pSqR4`@PpAGIZ_M&Ge_=c`d#C07pQ$zvr)| zzxTWUOX~jq_tWeFgD218zSclfachC!{5*y z%tim)DdYvbah1bOX!MVWF%ar9ALCjV9!l zc);`Y648Q;eKyU=g2w{9KYMu<8XTsz_dZAqpZ|rlZaN&F0YGtMXj7vKd=pLvu?_mq z)T%zR_N{xHL?$sLzzw!nWD$|`-hi*VwsO!xp;6%iS%=qDuQd4_fPfZnfMcHtI#RBQ z?ZyD>jiXKY14Pe#N~D}vl-)xo=aB(Of3OVK^gNrwIcN7<002M$NklDs_>ub;|I)&#nXE}?(k|#}6%qVXb(c#KEjUb*7 z?WL8YQi=Ovu|l{uR949tveRScCMonyC?4oB^hZ}Uo0Uaa1(1L->o|~gDZISNDf-j| zy&T!d=o8F$^m1GI=l0yc^B&4-9l-|=3Vsw|A`Pw^)qx0aE4I%G)9j+uqF8}Li=Li* z-sxjip4lm3TL0MrAciA}+4Ce^?;bo#3wY%&Ifr}!F9|}lwxcfxMGD)F3D0njDpY4;S759S+W4H%$a)^)J)$6`M2Pv(-b21KnH~_9 z&`MX}4G68wkbnDV7tzi0Ih>-1Vr_B1qdmN(@QGE_fxB9WY$_v{?CA$r3!{%r&#!@i6PVGt(l6@<$}A!JyaIok3{+y(lu z%@kV)9yb?MJPC~v8r2)@8bV34A!it2ryEXftEb!d4l!Km1i|NPGs~$oN16j`pPeNS zmE5QX{9EV!2KA1pZ~>A(wAO( zCVk~gpHK6(DfSvZ=J}l5R|E`pxYIx(F94VxyQ8_$+E z!e|r3@~<#{!Z5+J(ckZ+-M2TmA4X+y3L4XZVZTv!DlCDSLja=73i4_kP*CQ~J48{=Z8G5tFvElIrj0M5)2?lM20UV`qyqN5hbfi3VoTebxYuZfS z^$RGZLR*X3mTlBWm`z7sdAmNwE)%S=!r2;yi!KHL2J>>yF=$K|o0_3**Z{>t8-g-)=lR?I zvO!^7ht#VK##4@{Q41qlZ>W{!G4Trz)#2H0ock0?bDF5c`hy4P5DM?`;N84_EzOhH z{{HRTgpE(rCLZ?AV^=vTffe^=YT*Pj&NQSt`feKUj}b2oB%To60v|(58uxm1HC8=8 z^H9rC41gD138LWVe55aVUq_HR(T;hyV~Pcp5q0Bzxswk^3PJ!O=uIb&XJ-1qIiD}$ z?J}ph0I0XeJ?0#=&uuU!yS54h3<$Ix2Ry8?HayjJ=qzLyyc|PU?Z)j zS+yu!4&w_Ru&%|HhjyWnHV<*eTm!Jb{>FRh8-Ml#a=#bT-~WgIQ(C5tmHM%ZL#f1{ zSO~Nkd9{LG7Ta~O1m4aJsWrfAoMT3tSPQS&vo!}m>Bpi(H3$kU}Kgj`5sEd`jqq4Bp{}I)v-Q6VgV}AwyMUPH*pCdgdm^WPKkQNYuGc zHz$n(X>A;NtHFS*u+Ob2KwUARdI(veJQ+}+U!z#LL#o9X@^2J>5 zf#45`kq$m6#L*wVtMf1H@Rino99OXsw3#nsa;rDT1oZ07TtfqeWvIWj&9^S zO;S-<5Ra+^1XrQRG~i?_Bf+`!>{q_7q6xqYP?d4~;}$5)!zvA8U?CEUhsQ$9-PLm@ zq8~#zdjk{qRbo9XKEo|~6Rr41g5Y#P!SNYSk)=l-Xz(@)KgJItWG6UG8>4&e7`q8k zXHF7AtPj_@lRtGBmo*aU(uVgZTykzgKMI;lSXXMHHIhcVCNvMTwX|5 z9$O@}VKQAH?cf??JZq#B*yiWSXKtlkJb@Dck|zj9J@)ujfF+8FO8q&2fMHpMyajW6 zfL+s3NC?0a!yeNQaTB86+Z&`GzH*PE9=!C^*HU@zBKc!@Z~-fZC2G_2A*56XC4kY{ z-Zp0pAsSZUdU{p_jy7yJ*J)tU37%y$=w?w2v#fQ0?H*p9hZNwg0ScOYPT`>P9C?jAUyx~qEtJ4LQS?~DDl98U1qKQ(y*%OJf&pbb^Y=8O6dD>d(vX5p z0cWph4Kk<{8bHWF!zwJ#%W!-!u7Z*GrIQGO^0)VPZPJ+5zTsW_$BtT8@fyEp0nK__ zBAl-QHcD_pJ995t(s3{r`$LF~yzYBC&3;%Dl#J;WGTkp5ZPOz?**k_dGW8^2l z@G*46M0h|m6N_nlZWSYz$PS)pM>^Y4!ZtWf^8C6ex*UpW48<@*^rzf7NH5-4qHt>s z9>i0HK{i0y49ulw4^9x&wlgb?Ev1LM`{|GV?H{N28SlLQa4ii8m-P`&Z9`8?&MpaU@D?cwRfEI>qy0$OZu_Q1#<_XKbjDSSLev7g~2=mGZB(W5a}!XVUCV|877*|r?xubMbvs-O2(!zlVIW*ysOy`6r3?734xAyEK5{ zy#fHHf|qX$eKmoXZ3ZuBMFY{*dcHho6{E_|F!GDfb$&{pZ~%%lj8ue1DGzOtTtuPS_{)`OeM?X zE$^9+N)$}*B4n38o?`Z@Q2F+;yADo2tzIimO?R_#sCn8S^1mDF*@Z0x^}n; zM8I`IeWUn?WiN4h`CV<c^x8L~Y_wP`V+$_(y*ushcxN-M(y^k=**_v$63+%Ufnhli*2Vo$j?g9dc8$RZ zoELo6GG)PM;(5!|-#B5_yzrq>Gl4xh`7E06s1wl<>`jB(9DC$~AZb;`P$BLmaGii3 zhn+@Rn8>8R_dow?diu&j`tj?xF>=5*?0*bffu8JC<&Y)~|NYHJaqV%iNytjBF^eaC zx_y*>_cvck|KKuYK8!U{adB~S>a;u!hp@x0L`FG%fB-hl+0|N}S~X}HWjQnm;UfIo zcpJSJ_H=+lplL9pOr_UsoMyot!rLBjFd)Kh46l(nQ_(UBflyEoKwOS?^$<)>&lui- zG4c?QVM;^4_J-@1@W>*x^#ax@@J%iaJ)QUp(GXnS?!iUR1NaVO8EbP5IGq;$gXaS`mu}i+& zK4A;f31S2-+!Y>1j$|C+V=EpD0JjL?6;^4yJhzZ;J@o`$<$F9o`6#9<@SSl&I_)N2 z46OcW>J0;r6dwg6ctXzzDfm2AAX`9kgmTIdrkGhIZ}XUEBjj%g!@j$1n9>lkO{9C| zi7Et}=kz7zJc@wnI(acCc!(`_(OW%47#|*W(&mmH8<_S05P%S$TD(La@qp+Lf`I&L zy$h8n9)zZ#l>vtnGsW3gJ;;GR;Aa(A^e5!}YJ?$6^z~#*@t)OFla^NNgzU0obWW(v z(HiO5wEosB>1_MMP!h;Rl*tGHNT%*VD0!Y;*rzh_-lnMN$%>91>@ngeg!}4^t6|)& z-CvJ0SK)+`^J3D7N=-$^KmCWkXA6RAbo*RkHXI}h>V z5_U%X_al#m3-SnhZQJ&eso&W=iWLD^SQN>tvdSUb* zrLJJ!Tw4^Qd8f9$J4YCs`Z1{zCT(Pv^qv!FG`bydPI_n$5%j$p-f{M!h*7EMRzt{0 zg8QiP_`% zm(~>w^u|NO_JNgVR9Y65a3h{q?-UC`A=pVI2i*v2jI!y?t?>KF_BOR8c{-DHyA;-%G8NB*50GL?_E zg*KeWzet?|t+6c~?5H)Ra=;$YJwX8NHn$!DSde$e$g3HrqbVD}1r{BJ(*%cydB|k| zv;~I#?)A(GjxH5^Hv^B0t!xkW3b~{|{_*SScmMJ4q&x54rv~H!It8!}fDzbah@!S0 zd>H$c7B0cpoH2DDiMkSmK7x-w(7nY{%Iy<$Cl(hp%InZ5TNAn-Apw1eG5ELcPk0pq z<)DZ-T)+^bKPx5(*W^6`0g;yg%FqC=W3FFF58vT;_CoJ62MKfV$7^t$!G!P+0AsWs z_$k8poH5D@V+}*b{XU^H4xg?N)o{Jx5J7$kexrRMa;A;!#4p9Q#R%{z)`Wv}99pxt zu}(e9rBvsccXy3q_&NR{f3jeyL6OBkH=s*iCjtzpwZ;Hpg7bqtFjWKo!{wY>7}}mC z1UhQZcFy(8Hv>o`#NjU@s$ANd2Rrrnyhfp5cOH|^aUHuNStRy<{PW*qYr$G{pHa5< zdCgXW5CCiAywUBAqaOO6++B>sNPE(-W}Ex$)fja1B>DLZ+zYoNQ31}8bOSM^J$ire z&HozRLd0#c=dK%L7v;cb*amtsBZT=5^(f2^P_{SE*jM;QJ{!QBOP~BiIwGoLmlBmlo${fDZ^Wz^kJEW-uAd&JJwGImn0)rt#L9r^EyvO0d z#90K0Ukzc+;bl0GEDt?kLyHiGhaQpxYr(LpM55O)1cNf3>$_3mje{{J@JiaL!RFoj zG#WM`-~3i-zDnh)fZHJl85l&tX62)O5zTEekH*>T8H7N|97{ZbV~!T(;cd#!%>c4I zKHszbi)8X&(X%_ zdV2BaKAY};a2MKuY4DPi@V*LkZD}L{yYcZ12qT9Pgy)CXYzJm#uU-ucug)a$Hq64& zod8;FMWY~|A*I4Fx`P+C);CZPREVO~1sm%qk`n}MwZ=3hzPGlv!#!;9zl0}u1|r60S8YEJ#n`DXL?J7>=17D zaW)mfke&w3`uZchJro%5S@%(IX%rs7WRcP^+FdTq+C{y5#GV@x5jNBqeB(UN=&z=bmF zV?g=uw&;)!AOa?7$Y(HeXv1_QF}e$yr!xGTfA(4oX6RSuQ(bEx`f`{`Oc&&D3t|=K z^)57uCt`ehi9O{$JbM?TCImFR9D)R3t*1P^-<+qRdcB$U`c=WIy!i%eg71AswTOJq z??awbUTBlUxVX5KZvV-9yiM=NJl-EX&jrp&9=1Z6TIHbYHvC$Fpv`eTA*}Azb1F#W z!mh?kkKS1$i07deWQr}!ujnH$VL(y@8rD~}A zo+G%83Pt)9fN+TS&d7#ER2nG}Rp9d@Z#)1Gp7FUYl(pTJG}(hjErf$~!qH%QfH|Rt ztBq2OVc?!2n@S&nFNwugjLIx!r$t*9`UX$HGp^Sj=z50-8PYZAsjTrnPu{Xq_001u zz~6eVeQqoF=j^$QtkfwZKx!ao*4OFof}^9!Sq`8Ry|YeI10sPldglDbN~5lipTSxCVZowW!E-LLBWMU{n$5kS1xIe-6LzoSZR*DVMIi z_;YFd!w=HG`*(j7x#D@!0*;BYdB)>_3|nh<8MCdCVrbXG7JWOR_~Gw9sTN$8&4#9+ zHw^^;8RRByBkSCETXbn`g$G_a!t0)qgFQS49T>_44Q;)pMHM^iH>9kg=71f~L(id6 zI-^^jcQ}7^%IMiv@ACU3X(Ll}vuO<3aQ5gVZQgl5EnQqlUG}`hS>k(DlVJo$`=Rx}0zw@^+iVo8yCN`eHFY|N|s1n7q|8W-j zE0Vj}+eTJk1SN8_qYz31M?w;GQCUAYJA!}VY4%V#ZQefu4no7ZrS4#@(s9TQ_>s-# z-@K2Uv!!4hVvTDH_H?3Az#9j_#{%S1$cCFcz8(h*PX%~Z?g_pyz67=b@C3<%<^WZI zOa5+S{AZ1Za5;MshezMluy>|xN)Og6b*OD>p5XjoqlX$<7!G3?SWcTf-QHwy5?yyz zm(xR17L9gVgAh7^B5>)@K@_GV!j#;X_dPIVoH7jp9=#52N70|_Mdn!WmqFGRaMX1v z7S@xi+%f0aZLqUR897tN5!D4)c9i81$Pq?l4Svc_fN${hYUtWYsK*)V&9!E#?Xx0B zmAfc>qK?V{WIWfgVADyZ%kztAogDA~{rA4ZFr!*}`OOEZNUKqYQ;_=uy^xaReKlCD z4S~P8AWXtFk%bU5XTw4%qAo+Peb36rM+Pe>&M`cj;FED^v5WKKlp3NkG-+rqgA-+p zHI}Vef?p7EJR9AjkX4)-tspqOJ}soW5?pY#2M<+nAaEZHrM1hs!`uP^+<^Sedz!tT zHf!O7c#;619*Cbqpc&THYv_+@2}ly&!+MtJRb~nd7@Z|@Up-QQQwYMIu>w#l8#`TE zXPG1F=5lS%I?H>fFjtEZV*ovw6oA`eO@byaqBxyn6f((jYjA8XGfnDgROel^iV^j(nX10NRlTDRy^)~8lJ|sNrIADcC36E zJ?Qq|J;hVif{2S077hU9g2RIz6Uq=67Z9W?mzE&heG~=ZT%N%YHL^LaJ)ot@2{+2& zabRR>AB(foq1_K{ZX~z?1ZU4?g{#x11Y6-*bG>mnP9KeZ8%pZ*c;A4=l^yej9LwISW)gXKn zJpV~|W{c{n5m?wH+!T~kyjFhKb}|BH)H+uIcyivRK6tj~(-~f~kF3vSI)Xojh=tXX zUw~#8fV3LrmGdHeWom(chcbme1imV7fsQ{aI#wL_4sRti9fJngCpB^(0n|8R8Xjlc z#yam?i7LM-loW`3w@62VK1QE8WO`JboETvqZqeY3i@Y9Xbu39{QnsUR`JML=HY(p> z2A&0+0RW~T1^N|4TLj|@otSrY%yY9>h7`-D2a#W3{-tt5FPZtRpcu}Ps0)UKQHiGE zMg+Ri3lFGFv0Sf2?jq@X60H;1dZz+Jpm=0fl&?V&@*pvj>_Y6-a|UY`D%DcCtldJ0li3h_FBV0u*G&rbR#2d2TZP9Ukdu=v7vd2_{iH_roEWq&e=;d>4@od_K421$AT7n@9q&z5rob@ zwvxt(f_2t5Lh%9%ATsulHA_#XX_*6SE@wFU01|8o^rDbq@Axf;;yuM9-Qz$Tdhm$^ zl_}~vzW;VQphHBL5zvmGHmb3QQqK%fhIr-;fsgAQLskvQi4H@9>#1jANCZ5tj?k!e z@Q6PamYnv5A(Pye4KG!XJ93&6`!18S)nV>j^L5&=Aa< z0;BWLy^{XE^YY68+6&p^?t0hVlfg9dlrjQuvqQyxe1UN zRm{c&P4RyUfzfwV6!H=Lgd!bXQ`zRZiI1SYmO00MMvy7+r7%l*AFzbKjoO_8YGY~e zL=XPyqQ7iDqBZH_=qSoEzSMZ6W^z0WsX5B!5r!wBEuXA*YpV4XXx_d7IW&b zU50dpF7V8FzlLtOusD-WU;Pm|zNcyG!j-^?)($jjgJ&MTsUsz1nb^_u$dNjOghTN2 zuolFGHN*WBGFtzFg_uM|Xwo(qvFR{k>^z4W+LC|S>Ldpv*d$*sOe_rI&?^ug7>hQr z6k~&UxE6oLxTg$c%QA_ig|(f7II>jT%Pds@jy)|^5(PZ7J%ouuE$?;M80RqyO0|$x z016qEzkoZ&Lh`p2{^p>5`h_Q$XtSE$THi^>_W-I|84|n-8X#e@MbBzRg&x-t%m_jp zA6UoA6x6n{h!k-0K?7iB{#FP!m_fj$2T@@nm>P3GAV|9&WbhtUvm-{=dY_t`;cW3v z0(z~c)gx-lGs9Wt{ge@CRdST9o|fZO>W+Y&;5-xtU;LF{Oz*z^R(O?!e?!1xczf6< zbFn7qkYEpCEpl6>^cfwfVyehU5RUnIeVD9;1#EbAf`T+#e2npIdW26185l0vKV%dx z-yLMCX?u(0c0$ArQbHFfLnSBPp>qICmo-IN3`}1!v<^)Q6$OjZ zfu0t7a|+ZnG)gd{Le(w@5qea}^GW~DJlV)=gf84yJ+S8Ps!+}2)kw2izKNh0SV;fz zRg4Oifgwz3U%4XDYQ#m}wgQJ2r*{*5%GXy<7AEbZKTzKV8=GAme z@KBlbHVjk3%o?w(oX?o=g%=W!ewQML9#M=Q-BoCD5U*EpF`VzfW6*z>v=@km`xtJM zCpk+M3;S>3XASQ`FH0%~6~LZ`{36hcX;-Z0<2+b%Q%%9hAxDfJ?ELf|(IRL?#mb&q z#A6O3rx9r(f=DeQyIoiy;n@l}^(e~kK3kbEnuR}}rQT!D%HcB(Bk`w&gJ7S6Pb#S5 zoE*QJCp2pTnsUN?_XrcZWn3?MokJ1QoMG>Ria97`L>or3++z}Z!b70})P+vtGhEOj zqK?rdWJ^TFLI;kB(e+2?0eGf^0ul^57C6gyj1pRe%%wwz>JbdDnV=2Ya)OpaJi1)l zaJM{WZn_0d0<$eV-E9hC-LDw7!#d3`)f3%vKTs-X<}Aa@$~2<@VaU=V?_vdhKi|Q) zrKcvkMsM%J%3PXJ5n;#`xCb{L<&Cn=06s-*1^i^3rS zKz<|zlru#A%3RYNU;Bw00dv65H~#RORC)K)OTYY5nkK)oSjHK_I{JYBI@h9xj=sS} z*EnP4f;sq3jnW%$DAnHS{#49`2qJCOJ$Rrt}{jkiyUe1 z%UQX{rqmeOi4+)kKxv_zRG#xD*E3IeUSrufV%T@#K)6wYFnCq3T_ErbkiWD$p)Y(p>>PF^+Dr54^(^!cl`v@Na2X zW4?_Hm?BaTh8F9Yq03PkBqGyxrQ2KEX^*_qU;L#n5S5+adDX#0H38rgfMZ7m z4|(qB)6R3M!(-424v-Ab-+K4W5m3VeE|~5f(x=`?AhO4ZuPPw&^`YsGd!u0ii08`y zD9%Q(i}Ne+h-p;5CWs|Mz@hT_U_ky2Se3^(r?{wm?isQ3@p%suo&Oa5ODo^Blj zMx8v09$=$b<1C~}?F}7<0{39Rc&5^gG^0@%T>}7t8jj%%Au`!R#}?)m=p2HV7g;;N z)7jlahS2YNz&@67Vs%+}-w0y-L8d`Z9cVv_&g2OJqoCcjBh$M$K_+U`fJdT@M6oG~ z;dzfzZOF4)uf#JHpc&<;{gI85*G>IkFEx&OSS&rkt1^u()5^{QOpEB>qrF{nrimCb ztYXNX1h{r@Sq1_?W`zcj)zsMNX`UWi3_cwrO4|X<55Nd*oDJ34`BcD=u4{bo8cfBS z@wU=ADo?$Qax;{w-5vbVDEGSRAMRy@7^M);?a&%qPF^Avr2p_iYB8AW>aC}^AF>%t z)g@A7L9~20jd$CW9;3e}oO?`p&0hccuiGlDM<`Q5kg)VpX;npaCYudgjyA*fe8j{d z(P4f;pkotd2$iMe6N-@cA{46XB|lrMUTOG9)|d86&_A zgX<|jN6BQ_tFv<;65**jRoW+qv`k|B@bQ7Ikrq%sgqgF$UnZx26UW9;^L3DbpI|$(q+4dk-C|WK`&;e(3oK530g~|3Ppc z39}JNyDq~X?w5+jkN8pP&`X(<3nmD9)WGpKfseoI`3)~|d{98dStC>oRlCO_RQXB( zD84COs2rsQX+1zK|MoTMJ^J*j@Sx#YDw@5Xjt_;gZMZCInt7bbQy6Mp_+si3V{5Oj zrsbQDrRP5LxpZNAKE3;+S7C+uRHR^Vzm|O>yb<-zzeKRy>*P~ZWIUK>zS zq==!wxXema%p)TOCkVADT0=1?=WNesUbEBv%-M<&EgMuVa#O{K?s19)%--dub8T?|Qv z&LAUA9T2o@97}ViW60n3kv96J2RP&^;NHSu9azfXnA*ge1fBjw3WLPy`(6XEuFM$l zod8?%4ho!I32(NZLn$q2o%0j43b=Y*WyZ9$>w$$~C=TdL*raw|A+#GDEtBBTF*?>KQs{IMBj9 zn7c5Sruf{>`#(#+_1nL}c-36G^Tr2spt(=p>NMa_%`R_C_v{7qp@A!HSU6}3fWXah z_Zp${r(@=229>FMsoop)<3)cWXE=lqFRtI;HDE?*4t`qzJKFzI)CrIz7=vCuI>7jj zd#o_o@KP)n)Rd?%*uo&9Z3!Nu^({zkBd%`6{9_)}3cY^cj z$tRzr1)A|uqp}@rKx!=jhw5Te|t61vU~d&j{Mth z1t>CP2m8)B`kWSBE4CvNg+Yh18_8DI$D7D9unYORc%_hO{4TIUo&>Q8x4mg?Iado;niJ2;>!^rimtC$6TKK6#CfCHHU|qc0JbO`?~k;Fyxs z)8Qvpnx@ql9_K7x+2A*@MFn82andkv8n=fs-$6WmYdUbm86^$^!a$V9NnlO|^oi$F zrp;)J{cqu6wy+n0%ys)X7>VA>Z_w2A%5rM4-{LG~%RW)$IH-6w3{MOhCtEtKOYm){ zsgAAIFyS@A+UTr{7@?*8-L(7WTWMwW8cuhdb{NXh4_V_Kx^t?jG+YDl9_~S-)(7w` zv?}E=xOQx$02Co2q3iLA$z&cRUku9!A#TBpP5A{f3T8jofU(U_GA~l#s6e+uuGxd= zJRqzQ`$>**^Kqs^9TMisYi-cj*i9M2;3`yesw!-{4Zh7+G!)tbwI9;^&Ln#GcMsS0 zw8Il1dh@-ySgD2?1AIUi(N)3|VcphXD?s+K@b)+eJ%};)8o@J!DVu6d6qgm#f?kO; z3Kz)xb%4PcAqvBSDq)$<_nJJ1d+;&P4KE^sge%4Rx+p}pCBT-|gPl{nfPB8o^GfI^ z6phU<3Di-RtiMQ?g$f=AbCKI9Og$L(z|EmBmM<=%hybn}qN8g2D3wlUJALx`=OE^v zp>*)5vbWJslmZ<=mR>RirXD1FNNU|E+$`NJsovv02L zXY9l5YM+I;0HDdWW*DGw>)Orqy>I;yLKq7Y3%E&F`i^J=JlG6+0S5@>5p)jA#bQcBWsJb5B@H6apC_}AyK4;~&yr7Z7Tl@gV z;U_uGJsQ1&I)sL}U7Ve?bwW5$Z=MVE(>{@fPMCan9%s@%`#Gd#Oocv{MTFtmULB!_ z0Yj=spY)Rfd*^f|zDDR)-ZWIn+7Vm<><64V6SA6T2pmDG&s4-C=;>-9%ggYjvR1=X z0|J=I0+B*Ih>8(89-q-T2swg4GI}E;M6M{GH#8|QAemoo1W=%h4j!S*XcUgqccL~6 zAe;`mFl^a?e#?MZb9DQHX!u4&np zWa5l=uQx{J@9{?wpGk&PG@#w65#a1f?7Krfw4etl0HY9n_EqPY{FZ~3r6G+UmWASQ z74fJjO^k2oKu@im1vKzAnEjxAslxzfZ=_3|4NYFV_T&xH1lFKABMQi7=^GH_vyM6j z09f23WF2M@3X-T0GD#T{g}BIVm=79vO?aaKe_g;xl{x}t#~22E4EKQs8h8y`C6o%E zwZCP7e!X2%v263E)8XNr57Q}LoC!K6OkX993r#*d%k(ciP}Uht^4=L<`)S7Do*-|| z7-wz=2hYTU*eOGk?6(G%g+CSOM?F-4Ni-C7nREd*p@Z1N7WsTb3W^sPhg>I2J4WPT zhWiy+dxL(iH4490ue^|MzxsCii*Npbt~t}`OTYR#(p@lzII|iuOm4Rqvn|H-40kBd zik4VBw=`|qmr7Te?>WfBf?@Y2qKYy&?}f3;zQ-^Ey=;)L&I*fYl=9Dcc76b-mk^* zgq9h`(PV8kyy$53aB7VrBCNxG#K0B!=tOeO7Mm7q!N%i-EHwWWeI%H7eWv00*@B^N zK-%5AchfQeW@UMWwT_HLf$A8aQ*WzMjYPDNe|=KcGCB5>_oQ97&@K@=B#Ii{@PMgh z_6)Yon0#JR#;`8IsVpJ}bg$W11P^}%l=1{X59Va8yvANCpCIB9XjMPJbKwOR@B$9Q zg7-{2yrG9RlF#EMZSM z7ac6hK_g}wH_^35J%%I6XKoT2dR!QLL2z`S&B4vz5i0d&#( zIv3o7pda#;i626nEE@dN=J~d9SSiEdpfr!MaqF=gDfjI+!a;}dL66GYL?T3ydS^MC zE)J+jVPmThGRQQFagkGP&MEG#1H*J54~uKJ9Tvjhe29Yuq!SvsHk(Ud`O4?gzyD9K zQs`!#5@%p~7_@aF#xN}{_`(e)_a3L5=hx&gx5)A3VR>JEazS7&_SfM!%5g|9QhgYI zM3k?~fC;*i;4$t!xQ_$jZSc`!tl!!pWQxt&0RNtCpD41p)L}kHfL#miCMd?Vh&CS~ zGp`I23J&dr(8rx@u8!dqm4yUjD12V!eg(F|_WuS=*OM zQ>>oPqs*{==4O`C413TO#8U7BhjNXNKsBvK97#Jsm?~5bRr(O9VR#k9I9^awFDiHl z&JPdM5usAs2ld%pM>FPlFryb?v3XC3y$$G6cx$DMn%eU=-*209qD2T(g>u%?1PE4e z&6_)=U{Fcg*HtDn7ssMIy&x(nEqhx%MV=S;6maQjEkiWky3ey8ke74TK8x37@<|BM z7H2)4agIdwI^L~wgt{FJysjQ_J0tYCw_^hpsRGcwy?y8o;LRU9Ut|H0Pe1oU`oSx& zU?oAMFoewDu!-ldBRHw4fvXs>o)c;W2D%6tH3k=@F zb;7e@Fu6AVlYdcJc!v=oqzqLjQAC@1A@l%36BwBtyek7}7X20H<5itap5Z$dr#J~v z9n|9)4Y``SVrKMELrB5Ko^wGSSpI{(`Q4&51z@g6Bm$#%`ZDQCxf!}40E#%nvqul0 z$ufOi>94A_4vilnEcNhcbeq2^p9>642e6Qe#f676-r#la2;DLr4uj?JloXsqc#-F~ z-JT%Ds&v=oclCg31PNZ1su}2v%4}Opy zs#Htc>d9^{8o?I1IV0ZU!xFN}^Hzpd@w!DS2E1QH4rqW{Iczk*Tt3M`C8=ScM@*OQ z(1I^%@1xIS_l6P_FJbpVFL;jlM}~X_>m5v4*kL+XR4en`0#kEtM`{|1&! zM`>q&FD+bKPM`iOFQy;A{95|GfB7HNOP_x}edfg{zzulD=?+mu{x>z|0c!R#H&5Sv zuINF6@Y~WddozYNTOh9Nk?sRXvNp#pJH^c(>7b61_4x#JGK5}zX^^+zBd*1@r3ZOd zuY4I0D-A^dSM zDmo)%JLffyGbwZ#pfZeDp7j(@0qh*%PDNZNlw#rD=aIJoI!zM*tk<0q$vOL96Cy<7 zqT?wDb43oUQJ9>=IK-&~%|?G|jQlLluq@FCh5{QyL$`bsL6W{=ZA}@EZ49LFc>&lRjFi&+H0c${%-K1<0-dsl(ItcR z>gLJjR|p8OA!{z7qJyg0|Ry4Loyt zkvnmYkr&PL;Gu5q;a!$N@a`VN`%|1(1*gXx0w9k(c<}xmntMDJE!pIObBZH%<~Z^^ z=h|5F1YLuwM4(J3(rB{av1|dSd9r7OB_s;YC@j?gwg^ER7xy_xI33RfKnVd*BL}5S zdVHO2LYfXW_7yh}?B z+hjOZsKo-KeH09%_t=9H@Chc1SaEB&--i)xfQ~f@u=XMKNV594N5{kU1~kam4cUhQ zO))?kqIlR;u3JNBGIgDBf&^_y*X5Ew$LGp;TeY1V#V?{vxju~FnIK(-b}*grV4j6HufTM$GX$St4GqFB zNi%8$a-R8hh7d;g_+jep+)GEh_c^ou^xQMFCF_*PU!^kM_41AAgDMeosc3t~TJ*U3 z!%OkwKKOaPX39cCqchz zMHREWxt>4%9_caMRO8R01wpWk=sLoy7;BU!_=Sb|)c&rAAoz;C6N?LM48Qxk%CSJ+ z+WFyTIyGd+e%0t6k|9?WJdx%I%i9hqgXP_BV$j*DPk{nomB*wJLzZ|P^dJCJ&B4~& z9DALoh+t|Np}_j(9a018M*tQCezUR9Ype}=&H{QZSSXF5AhFUpNirmmAFMly6x{>) z0YjjN0*eRc;RMQpq!t3Rb+n(ZU&m5sZL<{IIL@>OFlg^@1)wRBPc}6*Pd*#@=sc%h zNl8vnsOMELyG0$Nt^%kYJw*Vbd7?)zKKD%e#)tojzyPUBfM;n`V?%m1zf+|u$iuG& zc*bbq!YWhKZmizlXN>RwWolpV1`()XwvryuJJg<_w2DO`YluRcoSB}vDTH{PVyr{L z@u!68qc{bbH9?`5Jb!>uerV1+_p;WYfD*_cTa^Q0d~qH~*N`*7BAyY?5DKiwS)0yb z(V(dzJ$PCA(=(@+%`<>B+-yEST1ym7xj6QT-QtW`EBhAXVuSVwamV`gUPC7;Aq->u zH6d%t03g?-(iXtmj?L~V5AM>z1W%s4Vwm4bYX=NW(lI9k4V=9b}k9*^p>*?KB z-wfd2g?`$~2KGm7BQ3r@#5NuQI#>!w%4|EYRU&-nMiXW2@Qh(R4J< z2HJK{^}3oPD8SO$;r_=Mch;$KroM}iGr!yUqJsBSrK}MqFPo;vFC$Qmn6gWRVP|<} z3`4_S&(h2oioXqwYlOH3ED;3{2>aJ= zRioSvE7q)x;jy%CWxT4R20$TD2mCsXGK_5Zl6^4mGy31EPk6rj8=Dv)rxZ^=6}reN zr0OM&${`EmY7uc9CqJ_QK(d=m9iCU0RL1T4flfu{m@D5!cg(%3J^%nf07*naRQ3ps z*U%Ac0@npN0wjA=oWpw}7^M|G=5M(KK7 zihHYRloRq#6|YerBUjo2eLS)Sb9ga6%6u@7v#<5&QM&x}r?>|C z7@d7Y6t5{05w$y`2f9-^N#X_$!T4M!o(n#x5S?hAt&`SqoKA`U*-O0us4cRG8XbAh zn_j!8M3WloGw*PQMAsBoAA7R2Jfl5zGaxIH=f?1 zr}8`ZwkWn`50Mqhp02>0{c-SE{S@N_KEHbFYRI|ctw&m{+=CwmP|(@~pgvB@)C#(Y zk=DoxfDvcMA3e%E^D)Nib@(BJEZuzT^|Z8d9Z-&QQ+lQQRYlo@A(Q1yHMqKf=OOpk z5#ZQ)r;46~$9kMwuJYWMziv27VP#&bLPJca($cpDk{c;ah~Hj;GWj`HwLLj?>vsfw zY`S?Djv+3xX(nEqUy_9(OOCYVkcNC;Z}J6-$BHoM9;5JhK1Ds@?F$Go2nL{9fI;i5 zN6%6bVXRd>WDOp64|n(}oe>%&YWupOPl0d;!q8&svk4s@Dl~xy>Be~v4^(1-5G9hH zUTDH0=0zG3^pI5EC5mavRPridCQRbyi&nN;ROLjWk0+$~x|V|;xUeR92J^K-AaN@h z)Ue@a{>tazHv|AeqyYeQW23Sk<`~Cw8o7uZknN3Js3Uz9=7)H`_b38$uPv5SX$fS^ z2diSB6ks%;?fSLL>CGSgDE2zW$8!B>(9P!rdgedbO35}!dL$bdDS|Ntt0BoIp^*+m zWs9{%&Opzwm+r7P^CX2^{XXD(Cn$75lV_?zG=JAIwCAkPydEPfc>wpx)>_=hzN1rD zZl;@fb1yviIg;sN5Ik;Kgxds+oW--mp#f3=$lDK6hP>Uu^eWalZMQg&sUkg12m7hk zxR?4HKTNHM-%Vq^wX{sd?_d4W3+bQy)4!ko*T3_((zpKfTX?^BV}Hz>R3Hk}6>j=c zRXQGh08Cz&VHM(rd<~1vUAzETCohR-5={6V6_a^wqvA3IEVLc%%oMiJj^8gJAO#HJ zX^d|vE4c!|!`Bt)Q2-`=@qLw)pxLmXD~mO5cO~95dgA;bS$DD?AZE5jw*# z6!<>-*%#6R;jmh1I!zd>)oQWtx)~q+ahgT6t47WGB!8+h4D_cwSvo2TZmH|K=BG7wy@w;`r_Du#33`{#9 zXA4jl?aFuvy~E+LXYDKmW`M`k(m>ZH0!RSY@9{kVxN_#)3TWQfyEQ|U$Q=9A?F}M{ zOaO$Y=jP^kCWIL0H1fD1+`OdM^?+4QgawNz&~d!cTX)yFFJO$q5MT)39| z;<4mx3=eu1IgE%L-6B?>e?D#CfjFzP?|_;vuG9``HhYH1r)P;u;CZI+^bwsT8iXdz zO+3TnU}0~WJYJ`$am2xUEh~{#z?@GN`L*={5eWc}R3?ZK2-#0qdhpCulJXG;7Hjg~ zO?a=UqDE=yp>;gCfLHu3Fo;pQcsM36EC7y1X-O8xY3yp#7=dac)9qPliVBzDHMU0C z#qSp28C{T1qPHdVIOLwnxjfOX3v+l(a75S`ejHCz9{Qe{=bU+Oft+SL*M!r7S^$fi zJVVE)+ww>ULX8xIGX|DCdbk50;K*R#l#}M7nzJal%j2=M6&f*c&YE}t^3V2#wkg}V z_b}=rhJY!Hl_D>h9kh_uoN*%^&;0x|fZI93xByz#ZXa58*a>682LQC6z5Xf+at?3o zDKecxH)=BU#MvptQR%CfFTl^^=}-U5%j6TDrE8aOM6E%L3ud#FxsHXFS0jg-JsnLY z!ytxsl+gk$yK;oF%GyjTQ=iGukypt6MY+{aj_HiBw-1j(mwFP7_PHiK z(>iVhpF;@c3>vz0>0)}cxdAVjVu4qcy;Odf_TX@kF28RP?a$OyF!``ZPOBq#2aK}s&#dD8qNvNW<-P;M z31P(H$Y2ljc z{9Fjxz}^d<0iG}#9RvVqq%sPGtuKI3*t|&sDB_@Mt}K>iHKK1O9+? zC?c(Z!*kA209YfkTVuF`7$=Nnz1EPKxn*LffleigGFyOSom>mcc=-XjiWd&|11 z;VMS}76^r!PpZWi`KeTNs(326mpuTJwu`)%ry2MJUL!*)XQ1{-RbXvw58*1pHxQp8 zKvjeuO&x#|mXcfHE>jHfPy|7TRm2w&47R}$fM~II7(1u;%Kn2r`?|65z_(yx`+f@K zBx1cDrWY8>3w^`pbvQipa6_tYz2&O|ki^t^c4)WwsvR(`63Mi2V`?^d~hJ7Jr2wR+a&m+8}y?i^( z;{{o`L=$&}Odf^|Pa|if$7=!uq``Hqcpi1TP(b64W6$-@vs?xw;L*d{>v2w90FgP? zEkJM)Mrz?v4xo!AXG=5p*+NE{N=sJ{zCf@95NRDfqMsw)Km^##jh{>NcwejYcyeJ9 zr!-ALq={z!6XWFh?lGi8 zq)UOVpjhv(MII_kBNi%1{uTd@&Ix?n?|N&&xBw|PlaHx!k)`xA&=R( z4(|_mZhuVu>ajO{K5O{BfWkFQ96wyc=$!J=lnK{mc-kL+4H|1uG-6kWz|VMftQxm~ zjng8c#Onb145<~9D8NGq!jbEC2sp!YtGqh_{0RI^V-N^RSoZqXq&vfd@30{0-E@Mj z9V)bOK$LNG^l7trc=rPo73a-0i+JRUv`JjO#GnAqei?&)XLpydE~z5(0C~>nR6_BG zb1GtJn|3jvGsqRr+_CKDejgnW9Yjx6IKw*att!+OV6U;ScvI2y=3P(TTmcB;h=C~U z0C8&zY<)RFdcYTc`Sa<+H}9nX>;L}O>2H7SYXIM6c$e!q;0J&eX*pb9epR-5PRdD( z8}(kQJ3X@&Yq8jp;+UWXd)|&A$b8?(o)JV_fa+`+7y;n`BATX?g1r+!3LMZu&{PqA75F+9+;v&UF@%-`>>MTM0x<%ZCYR_s15b=EVJN^CEQV+0 zb9_hxm(2@4QCA3vE$r;E@1{0HzC4&E1OaD{&k@GTD54`}v@c5$?kRn(si!7d^1?PJ|cqM(|mwqL^ zd-okus)(M^?_N4l4)lf?*jkf#FeCREnQF?{7~WWEZSaxvpicVGI4M^-p5d6NRfBtl zAPz4qj|U(PxdJ_-FnEEeB>2F7>(ZlMVI7HaA7w7QX*v`);x)QEfyvIn7xKAjB*Ox= z3&>2P67ILt|NPC?Xzpv3b(pf%2Dm~P9C16{U?|0J|LT|0_W7TZjz!lk9BOCmrMc0k zq&6AdT_$B?oHk>3-}-5~eDylA06xRuZj#1eDyn0{Yctcx9BKf(a~O{uqCZnXHyWHe zri?fX#&i9fFMZvdL?@1ET`SNPf=KG{-XVg6PDZ&FY_9GbEglf_0E^3nY)2Tju{Mje z3Cr?e833mrgL7CM`9*|Trm(`tn0BKOGz21Gn82ztba7^1IUck~-$Vt)I5|I&^TfK~ zVU&>;J^4x!qKkuQ^_+$fXWcD`Fo#9s1gdFAQ2ouutJN5G?E6_4|5E)!?B(Ae50;jxRyGhFaqpkRVau1hY<3 zAg1BtgeUtDo?+q0Z{}QCS-ZM=DTGx6BCV0~AwV#Mq!-q{x^r`QN)bX@=8`fohF zS&-%Dd7t;4KKto?UuU{!#|{iYfB-~*mdu!vic)M@w&ggnWzlg-q2)!cQkJSDtjbl& zS8=6Oag|d}mF-;Ev13uONs5aILfAnpgTd@G-80iY)7{hicJ@Abp7)SyvN7F#{>!(# z%kN#kt#$g;GG7Z3^d8!#WA)yB!jH7u!7B1dQ`0FF5^)6T0bm)SUqVsX|5wkOfMftb zDupy?3t~@1E9MoHde_Y37KA7j%{dnIH6Z3Vu|%MUqO;I24+!5{Sx-AC`-O`yr5FCf zUrD9uvuO(@(jdk@J%1)n?-69`DbNCLk#E&$V#%Qx8e5Oj&eB!D8kNOIYw7UrhqR(u zrBQV${pzoOE&ZK;^mo#^XP-%J(mr~yY_DItg^(w&=g|iHl}{hM_X#2B%}_M@XrmPz z>#w3$vCD|g-A)ol8*E%(hDHYn^(jbp0sewf_ajbLW1Xc%PtU9339?{F=GS|xCqIhR zpf|m8!fA82G{R))@a(eYf)5wg+&jTi2x9-jbCmMZqpV>lT@Wrk`PD60;WKE!F_}MU zv84Q>r!Mj&VTLoW|6~XbK`j> zP*)x=Xcmuz{iUlIa4H_Oa^#yy=UK**C135h4ZR0XI9_;){H+2sBM>(N5uQ0bjP~xd zzo$k=-_%)z8#(TK8$1`ljRg;kWfXu8*1yGkeU~SM0p~bC2jGu~IL?z-@v#iH?TYZQ z5eR5F!r=(P0gOa@stOic@M6eO7u4kiwhD-ubZxxrLG%RGzXHDGK~OdE7o8ejv+Jd=bm97j?)Yg55bpXDzG+Xdamc0hdsp?uYR7+ zo;}H&@f@5ffkP_7*xinFU5==Gi(ac5b{ogU?AY{>3W=#?V9-*lRfAql`nnDW}U*g4hoD#G}09i#_9m?q}>d z1BCzJHvt*@rl&YMFY!DUQcEeK%^)J?$v0rMCFWt?ZR#WP$hZophBY6p5^3NhQKH+* zzwL()(7*JrX773k|9f!M6Qo(4d+u5K@ZaIljFvqx$|SE_^w~y-h%GGGvx8BcNcO!u zSJKlLo(v;<2P^`zmkrXsOirXCfG7JDW98rQg5zaq0B&JKrcWA?`b`>l-=RC-i?tu? zaBKvEHIDn?bvrd!Q)6FV|Lmpwfi>q{d_~G4;~mCxJ%~J(M)d4AxhINNk##M`NO+$6 zpm*k@q;Jv6)Fm{cqenI(jus>!{!Dkcbm@A!a(5?f0F)Z&y8&oB1OCX7-ZY9aTLkai z{nH<$2~Ka_#mL*)--@~*b(Q)>$E3P(?$k+W9zJAGJ(#B-Ibb27`0@}#JWjl0@3Oh4 ze&L%kpCCu#iab1Lh(gnwpvA5->Q0=WB_=HtEdz<3*TQBjFiH*~SH}{FRvHk!F7h(o zf*c{XUOfRYi}|oxxq=40Ulwj1>XGt#z67MRrx#dEssOpa4nx{*%dafEAYky|31ny& z@Zm)?2lhBZ4~z>dOyy{?OL(8z1Q(&r2E;_Pba9am@^rfL$z=o!9ud5zeyQNY zqsPy2oFn__n#*8Qv(qSIl$;8e_wrH9RRwPA5{0vQVN8N=)|)j~a2aBCoOcY0)c^m)v9&d^SQRR@X@R2S_(j1 zD*>n;Hzo(2c0IS|ateF}5&}XC0R+3za*Aumr>EEp3>@fAWu{V6X}XqdJzp>(_i5BQ z2FD*thxHBbF&DMrH;b2=2&;*UUrd|jkyOJwo*kV_H4;mQ7hXsmPP@yGp|pliq|D$P z2OLbnvs4AMe>Fgh(FzT_jKZC989|YAbLQmo4(Ts^Oia(U(Lk|~tb>Bl>nu?05V|v* zt>?u86f{oc;5Pv^{inY`ukDdgsx3f;z@kYXR+WisX2`tA+)Sas-$U-P`v6bLhp>x$ zrZFqs=tXnfu8E$ppB;2yXx{EEMFg9@oZF1Y@aG_2 z!HHf7df6k5wHDwU%>fwKDd>G*{m7v8G57QVF!0U@kc(^b)-EsF6A*>APBfKx@ zwg{IYhI~*4DjTKE1LT3HBefZ;0MK@d-3wV8s9vMj>iICPoE_jJ-oeAniDQ$wm$jEB z*+&5~e`0M-wIMIoePoSoy@+TDu%>sc^OBKN9{MWgu`S*+n*yhtuoKDwo-AV7f_hCADVj+!E0K+On z_ZnoTVZguS0kizU&mR6G%_=kXLQ2c#?+VORB|+2N7e$81*FdDa*F5P+b>iLbkNUxA zs5&A}kK_gRY`wJnFzwOKv5dDp4sC)CJ)k0|A&&FGg0LFzJwP@_dK5IR14mgddC$$x z5)r7T#XEO6O_DSybc@3<>QKZ{v`Na|xpR*P9h;t^Ct4cPh`99m<#ge(^UO*3q5)16 z2FHbS4d6@kuO_%*KQ$o2M203*A^u(3ltyxZObeiI+*nQTy?Z0&hKT@DGtwegKb={K z&I}A76`;o)h0gM)EEnA^q*y%30bX^cp?{6D=$t{pgd;49P%&cdzYde|E%@hO_8=C4 zJyAZ}(k{{)xi2g>uJc+C&5CQ4$vg+ai2i^f2=C+OccWo0(uRm`mFhdGhjwLoKv^EX zvDdKP-))N2tE{Ch*zCb>l<(Qsz7z*0z5nA6aHNpZ!=6WeEc?L|^$7wfM#M`?^ zv`NQ6tSR1gQ+JG>1fe-+?lr|=l!&!??1#{7p8mmW@+{r68pm{1AtE+;VuTX}M>uhn zKEXV5^`m!?v8SRn;y$UC@~ZMb4w7K((KGS@8%x4qbf|_DT8JS#YfNK*VqxWU?{=)!?hbBL}X(&xMD9&o;KD`p=YRS+= z7%mUNHd&w`RG#M%cbFSWbr_!?U3i3Fd_kZRhhbjF*#V?se1;M_BpQs?N`Jf*R+sOA~%Y9u3z;5;;ODfD8gdNT@j`a6rrv3Wj_c3*^dp1cLww0L|vs z2Feg-Vm)PS&({u5A%xe!6UUX|iBo_#P$Vjx@LuwXUhTF(zq*#b@TFJN_y6PnmwJx~ zb>U?aV3YvA5}eKC1^I8x^higaUy2tL%0PGc8sNu*WeH^|xJ467-~weac`G+Fra*vmhc?x3*TDyyj5{72B2A;qLkzgJrf6{w=E^zqKM8|nHb zPDjB~%d#h?eO!3yV!H6ki&*>Rw2n1E3yKtiZRaw;KBB1^DXSYp{MEKNwyLl!Z#wy{}VP znNt9;PeJ&+J1kScx3*rSMPNbdG3mp*Smk6ca~ZYTO+SfVg~2ZxL|tVX3XJv92s1)p z{&%dY%gVY6a$R6)$9?kW@X|pqRm01CEPvT5sXa!HBcUB+1(P2F6u>9^o$lJ)hNrAd zn4OkfxdA+v|BU7M_XbqkDiw&GX|HpVc_7B*9ydV}>^l&;}A2%N9&F8&sdb*lpZ7!fab30igKUF-X zc++jHo(G6@n4^`>aR7*d3pzB%*0s`#j}90Pi|6{Yg|qv+tu!||4Tx!{_rLdJKuAjW zuUx?!Ns1H|&kYKB%qMl^!K3Ol{>P@E;P?sf&f{5FDnSX1Te_wM#J_u z4)7lBqx7IBWPBZCuT5(**Iz>xsf1j>aC4n7pe%geBuZ66Np7w~$I#oEXBWsJZ>In4 z5B|^eYrpu_^!kf0u=s#Tyi;w)D&JI?zuiA9vdbe!#S2YnLwIjm5!w^I#YEuGu z87UAys$+tl8K=C7_k#iRT|+}|v%bg**VpGNiquAahdfqqW9%wd#SmGK;pO_e^dNDv zClOZlx&X0kK>Uj~WuEdtC@Oyh+n^-*6W1WSW8vt06VFgi2qfJU9C-j3WGp)Jr#ZWw zCeccM&$B-|L=Kp*Mo$4yudyQzJ$La*WLPd;{p1SXR%o1J&X6^Zfosh*wi7}x^Ux6z z-7Xl0=_nC>f^L_tTuqY;XLX3C^$i?wI#d{wG;B&y7P=D$umD6hDB|p4&zWaun0;{# z0t{o=Vr&o7A;lw?>+rj@S>-rhamN7qu!RT2P%4;w(o5HJO3oU!wF##F~~ zFf=_!i!Y2yuH7cR$!)~-i&u_y;`%1E5YJ$Yyo|^T5qNmd8W{`mM>ss`2pQBSQYlRc z-uJgjhryVDbkiBK05YOOF3JrjnD*9d!s6n8NtA}$?5Ik3-6k_u>YS*HlI+xDu7u+rUK zV8aZN=`n->sCkXut^wvivu4Az2To1|g-_i2Jc5_4Zkn z2{gcB$5Ms?H7x3^Z7;<&o@%6$)H=67#LCSAAhBv#m!C~xuoX`crM!zGRlz9G!_I|g zr)Fko)ISEnY)4qQfM?Glk$n`l{Wnk22h;HC4ovBuXlO+E2BCtAV1-;$){A+xu;5$} z^Of}|Du}g=^4+`S7T*eCf&k2td9r+^XR%x8LX7z*;)q72sx3gd4-iyp+uMC?}yCEFrbNe^?ID)s_B zCh zuZ5Fm(#N0P4zG#cunzC3F}8Yj5xOKL2ycKX9)~J4=H%Ykrm#CN_6x&U(icSA5yEKE z5q(Z+Gk~06H5BBmPoDeghLyb7jl`={WI)Uv-wS3mnIqO8n4R9X0?$KLtt`Bgdl4Ef7A*Rk}#-{QnV{%w=q;#U^WYKS}D zt1)VQfwJkCPAMLL9ce$jRioXUdE2UO0BZM1GbmH6I!zkU+TA7Amp%C zPNxyunoR==4>9ViyW|TZdwV%e@_^jIL;5q@W=+sKKx&2bu(MB119EZ%r7r;Hg0OKc z%`y7?=YQq(^ussaP9OZ_buRKf(3$p>D^*p*B8|#}GFa?_Q&k!9POP_rB5fRRO znk1cI06nu$2MzP4?;>|kpI88k)ze$=yibe9XTUES$EF#Qqsn)64<2HCdV)O>)F@Gd zjtk00VqJ1)JX=Fne%9bLKeY*?=oVn1;ti6aENrnFF~-9uDY(ClfXQoRNk zuL$Ronm|3OfS5@j*mxO6QSi0D%QWh1=pW(L z{fxIl2)ZwWb{K3b1GG%jI<_A#(FYIAhy!P4=7Wb>WOyBVQ8utQK5l?|6AP)XU0K8- zv4adFk^||+k?)MdE+0*tX<~*bveO9V-8w{!Y%2t6=wDN}malzAmxE_{KTaNu_=9yk z!suMvQMT1DfL`&Dz4{Tr$UbC#^!){}^LMO7=7iIT`W^FZr5#fsOrsK!r=J=J57EII<+yM#1@!E?mRSD|yw$ZGTn8iyVAJ$W zv5ZN*RoC5!&p5AP?tAJeXgZ5*94Ak`KpIFd>vH+W|0Ye*!glQuZ3&U*g&wgY#vkdB z(i9_&IYjy>Jf!DW(O}`^2!)*Z8@Bwc+MP z4kS0}KGrf((I6SaV;Q^pGqH-m(qbtMaRQOTRF^sr_gWRbY@lT=gY*&}(5q_RhsU@I zt_@8!aly9%ihggA^etF5^eULt(-XOWFhYcV)d(=d5uP~5uA*q7AYyZQkNKM2OCWm2 z6;>7FcfxurW9-i5&lbV?^36?pRRu{lU)Q5%MX$gJ>C9sYR{?mi65t_c#aDu0H2=G?h-o$!;!fEL*v-tln?v%F5PO9^5!Ok@~B zbr|Uy%+;eq^H_JbFf!-a)CWE10BpW*i;H)O z5}@47&om){R#tc3`s4KI(mUyBsgA>7(!5h zpkT=XiVPV?2L#4YCEdc5mlp5j@HQ9&dScJ}D4wzBZEP}or*}OVm}kmVM*#Glb(Apc zC0O(sdglBVQTvY(09(rW5WV_Z@P0+agn#vP2zKIPeh&``_X>E;J%-%lTg9%j(2Fwm z^fRfBk~-ovn=MWkvf~JB+?^+FTATq3(j5qE8(%()Ydj&(f|{ePhhYpDmJR{Tx;qHv zzknM~C;dP}NCQvWb~b+Vx#M76Jkd=gL{mPhI8y zHJ}I360`)JGhTTs1U0;_4~|7(j|5=oFTkVg7ww$bqZX+WO+fyaUVWV+$nA846BNk} zK!hb3wRJ|Cw4^J8jXV(L3b=3>Y$>IFeuOO0l*q*Sa{`&>DIj!A`E!U zQKA~I42p3wPh}!|C69DBhT<%{F0jXY8=SC)qGt9Ul)eh6?SibP68lL*NGG+W(V zM=_Jz=lhtikrx4~dHeFe#;%G+16?EEhW0uNAeu0oz1Ap$ve2<8eO&(lt;i(jgOG`_ z`FGbkLg?Jr4X?BIT<3SBupv7P=er%SA$pbDKlK0LhAx5c6!a#GO) zBh8|i%@00Aj@a*d8!$YGV%PED7?%X)M;w+>D{veuXFxKm^#A)arSF6adJOxV`EJNNE@u9ZwJM!81H#g2L%{-+ezl zO`fSW9vT&&aDvrS&p#6<(iu%<_y86i6?$`V$Y=;k8SI&!LiL%g{*FN$rgTLMpSyVj-KMBtbfR6_FVmA zbV$%+&Tt7C8Eb@Iw#$Js(-b(pni}P%5W1S&z`N)rkKAGrf@ZlO_$msrxKD9Yz=0v~ zGVRSSQvr(Mj{EzaN> zVvArO#>w1{Sr>7Fof$kFqQvJ}i*%JKGLGP%&pi&>!x_wzhLF1$jXAqUIz1s^0S&)1 zB5~5uPI}?_$Kc(eh`L+g$5ma!3)$E%v1fW;F`g78Eez9e8UfUmVbN^^UW)dVJhu!z zb{$gyesohWdGv!6rHKXdtVJ0^>#?!HbnopSrrGi7IR1MBJ(q!Y*^2O&9ue_TZrk$J zzgdf*ZW9A4?@Aad!^nUm98pH0B`fgK^?3aJrHGy(a?d^t}ucr zy~bNZNCPhH=jj4^ENL9k>(05DenEV}Y=l_MowAbI6OQy|dVD9^K0IS%wDCXeYy?rx?esyJqsRI<}rlOKkU}pfbPQI^#!Syep zz}hf<387-8ucyeFqwI-UdhaX_u~S8aNC}%B6CU^ElO}XxE~>($N&bS~)(!%AfVCT? z2x)KaLE2w`m{#vx2Z-UJXPwKqv8-lpqRftu36&Z~Vh!?<4@XFd`Oci?geZV#Ki-EO zywo597#(jL;mrZUacAad)2++b(xtbqP$j+@MQi6zKE~e{$a7kY5KkOy%>1%YxU{06 zR0s)DOfBi2D1!lmp_N6cOZSGrSn@Dv;y@5cL+0_&0FOEyo1RR|i%ZPC3n03H6#);U zX*elF{T5ur|9GDOTksL#HE28x9gGf*mDql+mn=0}yeGa8IB`9+>j)%KdpsL%^wd0m zk2&#a9?#iGX*Bi0r!!+H^p)k**g*-AS6Htj(DC#zBmU1f0uxsc>kZtL-W6m#sRTV* zMctrKr+`4LD`**)d@PrhR2Ym&;T0ju;3J*^@V1qZgXzuzCB`#Vk~x8G2u+L_dCoQ9 zq9)!04-7C3#DX`+mWO%s{80~&MxDYryxr0sx7PHaBG4>`Ge2@~gB()$&+1(xO`hU3 zNN(FCeVs3Tcm<)4yMXHE8bH`Fm+2N!n0u1EE^<59mN24dQH5+N;^nqg*|BLDytgI~ zyDmZV8iuSrYh#*J-T0UJIgW)^Ib6cr5jsj5S`wur6a{i)8$cTygtCT^xQDz_Xh6AQ z-Z0s2=>bMuj=km1pw%i~#mKE^?Tk9u(ZVh=u3rx+1g?d0MvsNZ4GZ@5hU!&{v=*K( zT^kXw5J)<2ur891g-Z5*Mgw4c1B4laIp#*2;uvtQ$pT_LcDBQOl;?sYH`Qyi$OH>9 z1r)d%{FK@Oyoh*Em1oCzl}0h_cko&r;Dya1zXd=>gPfqCGK22ez}wvx%+l`d6%KaT z+E`CF?kscK8bwMR?=INP63Xs{{|}ayFbMYW8ud_=i@Zc8O`kcBp-T=QivHB8sWd;{ zn@*g1F@1jH9tIn2mu9EZ6kxcIJ@C*1)3XZfTLtfxDHq3l_Y}|}tgY!7wmj+~ID*7xz4mYZ=0&X(QPPdHaC#00zVS~+*{9B@H-7XM;DPmIJc6`Q(hipw?{IJwbf6Q6G27?VqwXt=RVPi6lsNUA z^6wDi&YvN-FkGWcQzIMgHjNg^I~vP^<0eLs^v5QpL-JsYrUX$NQdb@!hYILIY5WKy zw5>JC^K1NGbDi)g$`?iqJcK|nXrY!yyBGta?i#7cg^+*Yxs|cR7~l}Ul-sPahWB9) zP6K7NGK{pP9P431lTSPjYaORe^uo@KMY>dQEnfoHqZT?3`ZrRfY%76z z1fMnnP}+-0S{RaT@s>J;`z+40*S$^{c~m|!Rj6Ad0@!8_rkNchi*{D-r|~nVICH{6 zKtwx0R@_h`qO0Mv3r~vhT6o-bY?-=|#ke#o;CeUl=wt!Uwp&vk8L)D$IWUBnu1$_I z0+w~+1J@o5lwrMe;AzZdp*@`|BqvcH!F!{28Me zKy+|`@3B_n`a{}39x+BcLcw@x0t5T{ci*5LA~J&UZBo~8%vv@Bd zk86!334gOr(1AOV$M|OCQLL$>NRM`$Ux1JW#lfR&n8(`Mj>3Evm6MyGC2c6N0N}GA zEWE%9DL%70xaIM4y3N^88A1RU-Zdu4OkGKc%9l|I%pmKug%B> zY7lY;%dUz=Z&kU%I=n*=u}a?!Fw9kkfc&yZ$)?){-s8d=c0bnIaH@4|5aO<(&fe>MHPfBmlr2ePIzs>O1_^D-BT zfw|~4k5g_SXp0VfW-k@$HVALxvC>=V-<+onoNEweo6mlB46cA#Pi&5Op9O0g5%2%e ze?TCw0!9f*0T6~s5y%nFG4!XAQndqXYHdw4#K z899r*LaXr@hV^BF{%BLi+=j-7xgIbDU{T0eNK}KyibI6wpd*>c{=)8~UT()JjmTt? zciVj=I0&yKe~<1a(3kXTxJO|hInMBvdDIGQzg0Lw@c_Q0CJ0;vl^)0t$H?+~7wB|D zV!l%kxIiZgjCe1%rMn;eBsCW)`ayY)&CL?x#(P63)icoZ7z{nE=_(3whXRQLVdx&b zL^X2LqrDH$%-IZ{f9;9wbEB<~W90d9;-LbIk*amI(w&pwlXPDA4qzK93{l|?=ma|z z2y)x`WDLI}GsruE9*_ruTKGsKHujFS6#VO@bpFv9 z!->OB_WnKwv|w_k7hifgUA=xCFaY1+0Wi5*kB2$OfQxCnXX z%$Q?&5aPdoc$=0$J@>I=Y7x>|0ms==fKZBJX0@ugK@-SF^?YR6UGz@j8)|Q z+~8Q)AJfq6%wQWu{(xbQ{AL^$!8jc0R^_9N)6k0$GLMdYJwT7!ySGL9lc32ba7`Q$ z>>j`iY*`p6&R01WIfFdcfq2^2KA`FVnvypTNU=hUAd_a92Z|mgn8gcvOv=dtva1hJ zT8Dmk3f&y`=*LN2K#tQOnTLlf2&h%Kb=v4{aS%cWF#O=oB56KEwdTgzRzNTyXyM#E zpu3VzObn$ftGDoU(z!!rr;K48EK;+s!LAtcx?sPDyj_cXT7qBsm!$V8`5Xngc5fMP zXK(t_S6^lfq&Q&YnvN*d@ubs zzxB7dj}A8gql)XzJT2}twdXhf#;-(C&;~inW20j{hed4x%penMIy?xb%+b;(0RlsS z-(%3no)zt~AI7XFmF{eJ(oIrAc5y~b%w7n&bd0Vx53_=|_~i6lItRF}l1tjAW@HFm zqUXzFRHe;+^%8nkwW){KP69f5Ryasx0vT=U!Um@U?&9U%T?e3+hSGob*T060$Pg{O zLVbz3+}SjLf);J0EM-a48^96bk*&%TQ^^K+#|{pkVSup4qPk4b)CM!?Fp(Z}040Hb z4ImWtG*Fd=?Z?Uii3gPrLSt-vGj0puqi~vi)KKo8lqzk&Rz267yH zM@mGV_vML{O!necZyuxvRdRN93V`z>JPN<_FjAs$a3YJsF{t6do@R(%L|QB=A1Vws`<00LbI^fGJQ9B)0E&8^XSqnGVxrrqemoa9xFc04& z_+lR-!oYh=jD=le4ZvvBN?Gid%{=z{H>0fvNLu2v6^gF*kjM~;hXuHB$9M#-E>w7E zJ;WK;dKdW2zKD7r`v|-1Gl=Dqpg`o&JCF(IVMKeQ9nqd^&)#wfSNkRo;>B!&?hgSv z^szxOF%_>@@iH+Lv(~P}BeU9?OGV-$KpqG<01EeYLh11r=fPxiR`fx1$0$CPk3d~Q zHV@Fyw7|}!_a01LtuueNf)X7Y8zn&-3N+McOD~I?1aKZA5#bWXY1mtUB=c!7*o{H} zZlRl}Ma@6)81u)Q1pyo)wBocS#jESb+zi#qOr{a|jIpW7$Yri_tqf{9hn{1ZQGjgq zZvbShwj&hQdvE-h%UbEVmtW!Fh`9iK!y}x@ed8udkuW3o^&q6dL#!JfU6nQSveLOu zg?;#NP?m-$wVt%vU0c0w(YF}I9p}FTRt%lma?3o}i>k%}eHLZ1Q+nnKv3A`V8+yO*(?$A3SfK_Ojm!tqzK&ihsRBxRl_a2{{F)W7Q z%&Oqomk3Ei&sNb}RWDsQ4nun0AWgfj-L>n|n2Z~yh!5mTx>UG@_m{uv88VG1x)tDo za?J_@QykYyZ)U9O9lX$W=wJ)uXXn8(;o`f5V%O-;is6ALJIDLjehSnPuH;*(6v~#M z?XJTQdT0QAj8RZ)m^_OS<35MCnctxv#v&+3F7Pe9l0Rbp%+>Is7Qctj^q_#vS2bFr zAZNl{trC~%TUP)8KmbWZK~&LnWFGv=>OfC;97#URAiO&$P){iIOK+oIr<@i#<8sb= z;RU48PgWVgb8C40M@L4Psp42pB-Bfna`)L~v( zLZ?{@e2U}~3d(m5w~*1$K76H@+vrB$;B;DBdI){kQpY#*6(HV+VB8>pT?-FmVKWf+ ze6wCKXbACw9$x4t3hy9#_t*8P0Mz>ElUgUeLSP$4JfC&swOB93XqO(tqwc$Tu6f0J zUAvY1UAptsFXgZF6-9=~N1x~ZG031pjcf)UidDmp|09F&yz*}t&GHD>7)B2-K6GYk zhAB+G&+xR3{BhVZUkaM=;_5*&tgWG7k&EM1u}cqA7~UFrJDVKPLquqeUdS~fLM;+D zosZsUJ)RzMJ$`|1E~bL;Jdb@}#o+Cu*SkHt^>jDjZBP2Nhw(PFY>TELJo`h)U_FUU z5HD4UHd$xV^awqlnYPA2blkv5@F-h`f=!D@2haS?&n~6YbZO|J_w&vYz!}f*&@g$o z0wu=3P2uDwIhj3pYuDB`LLS87KkM7!Df18-Wk5tdIfx^JlcYSt@z{7g|NX!HBjji+ zoqOy8fS7#%fK>+AXLL`d#U4}f^%M{nc7&82tVeV?MP!}P>c^W>sGV1JDu z!fUK4wj;ERhZ^v=P5;$pyyzcYUrduvK0$sYdA%68I*;}qqB8-SGbcHYdvcicE7D1z zEj_J6c+jj;m+p#2R{`T<$*GA^oC-LoFxUqA2l?(x_wGGNSFYbo7r*>^nmT=o^IB+q zx%~)&?4+w#Zs4svjLsT*!0qHQgbbBFtmP=9*ZOht*!4l6YwIaCyw}*?s1_&((LZHlgSg1BIF`2Tw)92thi&Tjs zjsQF1bf5(b&_L2LCcUjSfi)&K{da#Qt>InY-^bXd_8`7= zSO^{&af!o0YP1*CIO)Z?m`7d?cXrbM^o?Ii&pvfBz47iHc1Q1?cTcZXW3`m+BLJDvX?I9D~ z9}PMqSn4C?m+677m2z(O#njMIS%8(mYDmh1@b?CB9NYc#|&_s?IkF(<4Cfq6jrV~cP-orH* zk~od`iMqA$VBUsD8w2|MyB_Bv%Bw0!g&_0}+s9V#q+?a=s{qYORf#IN<9uk-W#Yq`q*U0PS*)0|t^c!H5vc`C>z}ja2DrP~4pjV}P2&n5rc=eOB zW0U(MItgU)C<~StKmbL-jK}{)^(tek*&KR=j*s_GpP{Hg1SoGwa zef)_i)G$YO5200p0V3arJs6unq2s|gqC$ohN_RfHhLyjSp8Mi!oQyI8^HnK2ctFK7 zT?_~jT9~l?i0W_N(L>d##Gxm)cc2R->Vz(o4|+cJ0$SW8An#+WD!yoM2GeNNXm#51 zr~`wmegncjh(jLLw3pi7Oc_4=#YX$j33O4LR=(y{E|2+F!>apm9 zLplT}th;%<5G8ZR>PFM&pewzFdeAgX1f+)3cq8{n8jOW?@_qp}j1oU92;7sn$i>Xm~%i7i$c5#7!i~T>h-Eg z3!W&L1*w?2;rs$5@*nrWFq!%85pIj%`P>@(esYVsP#wh^G z)sY50-rLiPW>3tg58wNUNE-BA9%Lzax55y`u6I|^=9;-LIfN~5Mpp4nnnEvWQrR-a zIsmK!r-;=7|1AnqoY-BuhqOdpo;1g^4e9X%as_3D$5C))np0>K#*a2(&d4(9jL^1y zSotG!;y#9Cw}dF}p<6+=XFH6kEQ(P15}ia?N4rJv^4K>I!mw+AkFd@mzJ9v?rgAA; z>~R`>$9ZRXs+qgFxEdR#q*V!Ho5SjDQP{=!JOWumU!&K00zK-ksfyd7vQRMxKlgWy zJ|Gw1j=bB6VLB?%H3uVLh)^T9!4pJ9FsKAOy%Yv22Q6-r&JT$ybvW3hip<99C$dEs z0`mslKRd3lMq_~4b&dnp@P?D|WUhlla4O&=2;kR%zc`u6{=5}HQIS5aIvot3XGoRU zSXo0RPSTQ(;uYx3NR%zu?0O(SjF1Pov%Cim5SZbaLU-(~9;9oRZWA3EPiH1)0ZFuw z0(irb$bekBPdhqSe2l!{4M2dQ+%mEuat1kpvqHEPKN`xH_FwB1xdE6iy!cqUbo*+$ z^9Og+Km2>Y1MdVEvqyN$I7r9&74aS#e%-)$?xzD#X$a5t%uu>}Z-rfdm6S3-pN5~b z5Su~nEwo9ilXQP|DgD3y&!42?9J#x_7-k$-U8uCvGG1dc$k4@)(!+I*{pR2v4F&11 zL%x*;W(#`VXAPCP9;tmm(fInxGPPl*rW~Xb)6<+ncP)MI+y5#3m*4ms>C)xfsX*s} z@iR|hI3EI*DfUD!ZsWXBUk#9R>meUj*4wvT9ih(9yD9*dcdbZs|7=^QJdjooxW*!^ z1Dpv33{OArAkXUn(>5M%EDgRTR#uA!vAJv0b zxLG9tD|t^apmNdYDtnbh&>$WwR)=|@^GrX|vuxC=jw7zU0nkTpXECS;=1v6I&j@@K zhRDb)y2XQ1Ji(3xY=+Am*{d4-WW?Fp4!c+g#5x@eQkX^V))THAMA#w+m^b5g9ZcEM zh|dP@0HCQu?N-D8bS$+6yXTAY(mt8n8^ALI!@0136gF zHus~BSWy1s+|7+fR@XJ)c-9`Bg+BZ2MmRAK%^hc442bn%eVLEK&UrBag;BH&9D$GO zL?Le6SxvX@u0co43q#WVQ@`n+(1389HKIVC?#IfbqkRTHX+g6NRqz9Vimp|7cKh*) zU-}H^CwobkhX?oWpnH+ctY`OZ1ulhFjG!Bljo$C9pLyJnC+pI62uVU}ia~U|ScDG{ z&B_IIDZkrh(iGWj=IpEAY_Lggjpq#;@-|$iH_?-i@>pdgAMgNffN_ev?AS1tiyCGG z6U3t;f-k~MGJyG@p8X!ulT~+M_QACiL7oN7G1xYY#3{aMZw1RAOI5dafsHV{77NYR zb}R5;D8}Lt1X3=nfJ0BJlhqKkf1}45ODZ5iTAPsL#7P_iUy;9FMfpP&Yp$VT_V9uL zPBH=%6ov#F)#q+7zdH}p_*B3ymSBA;Kr1c1eJg3>Qwy_&~jC{(3g1@P7Z zL3KT`%vUjO!*J6A98VR+6z9AH7Ua`NcVRsg+;NH)`v-K5wa_wkuGW|e2K1Cgqj3PT z+o2~%fv$JPHa>P6Xfj3>jn=5%`S-u`W)zP-{lXX0@Bia}nEu+Y{aTu)OTyUrIQxKy zn{l}xpxFSwS;kc&y0A^Lz>nX48^MVp=Xv&6w3sfG7~#DNo+*zwEbuKDmT5eKQAXUy z8|CCYB6;ohViat8dXNps6mEu0y?1vhWZ8q? zYIwg)^=ay@VC*%*k2S7o+Q^;bor{DmhfmW zhLy|a%$nO}&URD}v;IZamq`H(**5@2hR;kzU|Sd$@28r#mx^->jUx2UUYK{+RaD3f z`C3_l(g+}GUjk!HltV3*)4y^p3HICoQOEJCw@L!)M$&~u}QlqTBR zR2dVN1{*>O6P4K3lIhJ6O(w;hy*#VIJ%p8*(ObcMPtYQ zfVCl(1er_60ol89ZC&iW@f93#|^& zIM;bmL#eMUT}!hIgVaWNylXYR|K=w+GLF+TFFj2P0g*W7&+QnxMPxJbf_1a=gN_4# z9-16rGVBMe5=Y%bPwJC6l@eKY`pMIHqw48D{Ht%#?{*h4b*9 zMuRD6{S@%gt_^v@4DDo#Qw%QqJmBV$vQ@=ti2w$)xCE5(ff<3F>!)qG2a}1;2S6W55HZ$VB;bkNiejOdhZGvBoLyQLY8plRuH+Aynj9 z`Ai_^GvMn2Kes82YFY5hx}w-=<%nYh{c8cecqL>u@+=S}{Afza zJwdQ~TH{hjWFI|j_cqok*ra_R4yQx*U|;>_x&tnI;60rrYzFV*UsE+$Oa23Y0`M{U zuHU$yc1(oURnEI$Dt>05F08-1UOW$v3I8I0F%}4-q))FdrQ2)lCy_>LG|cz6=E&#v z6U8?r!y};e9WeC3Cm!^o-fM}aNH4$xlLkmrDA1-f(VrjUNJ~r51;RgrGsgoub}*9a z@K_y;5Jhcztod9oFQgZ;$BYEi3deBP_N(iW$);?^k>}u1GzcUH80Y#t&a7N?E`X4= z$SPP19ur(*5$)Iz6GquXeX(dcEeVMaVzKWX8lQ!Mkt9^UJ@!wVITlH!5K0bZVon^w z%1LDrN`g<&2MPYVxw)4`)k^eV1*;wkLkWiJbYELsrB$P!O#l~#V_esQiYlZLSh3pD z+sed{Fg)7=20c9xi68=D&uAfFfEqs-F6m|QT{RVo9<@#fj{sNcEiuF?XmgS6lV>=( zNr8_F$1}RaTg>14M#(XQ_>Sp!7@AZ82oeNHrggA#>@C~R-em!RGC?%C=a2G;o_T?@_qx@-zX9mAg_nlaOMmXQ^z2Ko zq&NT5chY-5{6Tu=g_qM``}*HX=kb`-DO!WXqfLpuLS1_g0&%+x(J980SK0}zT10vJCk@+LQQuvP_x%ELT#l85O%rqlEr*ITdr+6o~>3^|RIefrqCKN>g# z2$Wp}m7t}F7g6J{2EFoeFqnB)=A>GjC_O3gU?SFxzad1;fe+yYy|`9($}lt?U{}G1 zB#`c;XFi6(1(HmL_sNUi+eU*4))an0tK7ph(g;d;A5RyBzkoXEo(AY7?y(T84|4)_ zLZf)wxp!}if=&u{97GYzIgcXs2;d&Zn`7Q3#&8Y;Y3b@mfXKVNhlg^X?UITrE+@>F z9Uel;@LY=;+n{1Ui9%PN9l~1V;Er>j`E(v_97P@cTjQBdRJu~Os00mN+X~9wvPJU6 zuoe*52u}!4LrjalAN1fJ*vr=_fq-&-{SkZyz+-Kd0Wd7@)%T%y%OP})Tt}k^MS2a7 zI{$W9Z&y6l79|s#S9&P{xGfo$TXR0*p8SS%0SLX5y zM%-(A824jvlj@8-C+81u`w<3>z&6@I0@lMr#q)bjAK=-h5149k3~@FnJj=3Yu7>^B z$B4ukMj!1RO*1G#J;4Wr<~_QSCxuyTe!8L_d0uE zFU?+L(B{S-(V7YR|Bi*{aaa0hp*s{uo2C{AF5qG52LN*hx1TU&j9BEt3$I^HQydgx z75A+hH_44XjN&mJ64HaMA2Ct2ZD1kx3q@7HBR`6PndxPI@OdfcDV_p5dYj`(wv$AT zV_i>QTuA@&-~79@_Vcf%zx=hYAV;?_oJg5iUrDpG)9LidQI3|qk>32?_vkgglTMvl zV2^A;hOA@;o?6$za2RlE%8!l>4Y4qEpht6bhdC`2=?6`ipNt{Gn8Q$k z&&%lZNR@K7>Q(3kw-A68{sl;B42Pf=Q@{(#AN8mFEZ_{;1MiSSfUF~>M%tj*P)5;7 zr|$3O`x@orI{DB01jzgY14+>Q&Cg8pV87APTZ=333D03oeXdMmjRVS!;yH`+oGX|F z*^|Lz-$MQw-6~<&WTOz9cgL&1CcHzwHMK(-X)oj!eVNTQRxU_WIudlEm`~rWX;GHI zGrYH39FN}OdFr7i``e8MqL2BT`lCWrQcR)ap`VT$`*e?}Fs51Zde@=x1Nguc&M*j6 zn(PCcz|H(!`OJGgqTMJSRwDCt@ciQ*AYTxK2o!M?KdZZN(gj193(b2mxA3`=XC6LQyp1<*t3?5<(mTEG;0zuPQnLMK6ueRfGy_ zRa;arMui|KpEqcB6$$6vHco%A3cf(*&1r;+V!ybzW5h@5f2f)N12|@rftclbuS|V z>0uUnS`w=2M`+iuI3IiJ$@Jsze;?3EG2JnQjR0|M>pc4i0qEMWB@hkjJoZ|7pfUsS zVdQA^H1!fdt=%*Y=$+3!rc$LZ+-X4UIbv)2@+3g3l(lYq@$x7`Y&qsJ#ICY^Yrpmz zzrlIfXE`ssCp{wd;Q>l*?Ch!ZoBzY_u@>~?qerDx!S1`n>AGm4c>HXH!8JZmh7yYZ z=;5$iLK7M~GCaxN2qt7k`{1g?vM4{D2v(1qSAB#fuLoC;_8>xj0E^V{$%)Am>D=QN zDZV>MlT#zv7wPXCS9|~p3G5%#;eO6atu?Bnw3$m7aOL!00=}p;PS;pW*Xyesj zu@OoLYv!QPls?i3e-nspl2#*sXtdg!7Q@EEybdWO<{5UsIPhg-lRS3#PzHoo{3iq& zvQL3)3mHg^fjCfTnV$vp0zpBV{AS8cw@)ntR+t7KA+Rw9L9_u{os7Bp?n$$5o-}Kt zK#QxphDHybyf{>Wxkn2?cpz7>QvkVy7<@)TmY0_C>PyqSN4j==(4BpX?K0yO6teQL zlTkhy4h=nIibcel%_pu??P+)i}0KypuMv6 zs5adPRD`^Q_u~cPO?sp3tsV1M2Eq^a#T86?w5lB$qTQiO%(%NWuJ^T@N}%V)aHGtl z(PeeF(YCs26@2AcS9d_35CnLbipF_t8%q)3gpY+#4pX4)R(M@_p1Iq+ex;!R$>T+X zU${pFn}vp&MCRIhQUu5ls7IaJ2fBxKFo&Nv^I&8-#&RFDC$N+^5BOV~=$gKdIv$z~ z{N6+HRvQ^wpfkV_d3ufQha4PJN%tv8xqI(nDv~R|$9N_u>6c6~S(WE(W5j1{63ZD?_r!xopukthZbB!Ln zle5U&Au_!&zWX`6rHv=qR#&dI(X~iXgSRJ132Pm&7tGD>4pOM8AQ}-Z(#JfI$Mg$N zFWlRT_-C|8d-3OANI!V<{dDWjgY>&!|C=G3`sk?Apl|XVDHwn2zxws4tna5kHIf0R z$40E>5&LU51?5$=@{oqzQ8v`3VykCcl8yv24D zaOHmWm%o}m{K-$~v~veq*hx>m_-s0RW`QmR$Xay2Al6oF7Wnxt=_re%%r!kC$6F)J z)ELt_TpMc`^uDX>f~0J56M7xq5mfPc5G%4tK0;yfJ35x}da!|~y)v-K%~Usaz!X_z zKm*iRGlk(F8Tnt|k7fRI!4}w?KHFX2jjqsH7^hcX73^y+7a4;NE)Vcjke|ABFltcKo0joC$WcuMdXOG&SKE$vc&UI>=@wui@dQNp%EG}RzKeKGTv}}7ucP+40SFs=6XaT^o7lCFNQ_oZr?;pF$hlGuLXruCAEoesKI&d-&RE2)K z%K9w-F^p7VQ2s&Btp!_urg69&>g6*Z;RF!g0&EDo1cT`5G1EnlPMc?#rx)$Pc$NhX z7NkYN2KSj4WBUpLuhs8@JOw=BmglQfcn|AiD~cS-L`zGrV~#P$F?4LQJ=!E7LttS+ zKxldoT}KH{U?V@;+9%ppp%Cq);Luh04Hmk9)uw>gZ6BQ$NI?(~+SaPWc=XgALOkY? z6j2;~?OxiwkZ0vR5U6=?hP_Qg(2E`FhYi`sI*Sc~FiH^J>fPnEx_mcHFDyj<8V}}q zRFfj$owtKFiYs0?Mqq9sJjZDHBk;4sz!WXDCOF3L5OAkgNsw%Qj~+1rKoqYqH=A1r z-@)7#q1o{wgQCfBk1eQJdT0)?4#U<%;kl2lmAP*S0i;bZq^t|W3fT8)ccVwso`&b2 zeKtM+;ul#f^62pH+@sg!5(iXNh7p?Vj}68P=3@gyY+GX28Nq=J8p-$2h2C>{t_JhM z7v1gPdHoQ!=>n||_d!R|R}}D&N2$6eY&M3QK!x{^xSxk^UU=aJp-Re0aaKCY|V6V};|MDCkq+!3*)sV05g1o&#)vy%4~>!zfFZ;)V7; z3ML4EKbjlGKDIeg4MY*Ns)DI|x|AEJFoQ4hyw99xc_NC(95mIpAO_YOzCXa5%@`m@ zWjwb62z{x>q0?q&dMc!Uo*MkFSFOOeMrnynGNnS66i}MppqIMo8X&Nh55dn|r_zY( zWPuwmtF!m|bR3fZagBH^e+OI=nzqm6_|$Y-xwnEmC4UZqYG{z*AWs+#d*PVF;|`G+ z60HD&km9@)`sdj_{L2*-{%{I25e5eGKoDVdzaZ1fde?6VBWn{QS5PldYctMRVjh5g zfY$`(T*`Wdr^ZDX55VibUh2}j>LW3`Qdou?;8!FK{V|B9>{5*tVN~cC1SSbE( zUsV;8;bvu(_5iYa)b@z}NS~%A>9svvZ1 z-h=nENoRv)6(5E@z;$qRim-Gcz5n*dsdi#KJu^Q<_l#=#m>Pr(q2tCDijUN#`v9)h zJ$i3bp*>9DtC15UeZ9QLPC4c^&Yw7$PRz`Z^ULw;^nBX{RIE{Orqb=jHEf%+f!Pt{ z0XhbdXI^I%iZVZsLmnU`zsvQuOfu5rQM=|sD>IBF4&qEPmCKVr^F>0b$WX)24dkLg zUk|4R(*x!QuLHhWzh1g47{$`tkNKK*4tCQQzI>6BwQi<=@=yPd^xMDrH`5rYH9bT@ zAMGK#Nc*Z`Snjtr$X%R>efE%w3i-?7c<2z~MfTYR$rIDEbiL>S&}@;;Hjd1f7wt^c zKUko<1KmfI!|=EIs)4hofwiG=73oGA!T^XGo<05;0P6dH{6<>({6_lS|MnZmN9q~? z&j_FN((BK22@y%sO5~AOUwS_6K_gdh-DJGTR%pl~#>yZ?)>^yFRXbC!nnqt%u#Ap~ zqC4o{=-w82t_6%$-{WB#wy4vIrCbjAA<(4*5bTKTLQVw(X=K4e$UOYRe5TOhcV!}< zD40`|@kg#H>jT@e4vvlYur3u05AjKk1CBUbDR@Yu)js->CgyN>ub%T^^0uG6 z_(b~ngX;{5Xq%!CxuX2n<1KAUr_zwvBvK<;D~;i(gWy@}43Bg-s%9R$1%gHZ#SjX~ z4m6a(*;QS+0R}jg_LOBjUqd$wW_j_|$8n-JS^m~ScFedU#nY6t$wJ2>49UfLbQ)f2F%fb910|gHr46Fyj?h@{S zo+rJG1-w6pd~S#^gMvGM{_*sczx1D{=l|R<&@Kf}F#u)pK4ECqyNO`D|8OIc-&b$k z4Uf4L*Srr zpqub$X!U5NG7i1U7XaG}q)a^Vr9Yp>&ppY}(UT}A`ayCcSz~lMJpkm|d`Q8+K}B?a zYBXK`$)$Am{0YD}LIxw{`sL5rPrM53H;W930j{JKXisSvey^VrQB>ALH|A!A@RR0( zwzNL=RzpwFvh=2U{FEt0;6v3K68P`q^}-wJll9>ddSPNq2JHaFSI{1(3plx*F)b0?@k<&;EVi5 z?rX9*^1TLTc(LUv)`{C0B4#r0v?WqMd}HqW1|j6q0J&pISiv0YY@b!fYyRyqoe=D% zaLhGU;v4zVtiUQ{EXV)`mZcJ0?*r}|z&~;iAza_#SaW2ZN5zJ+V!dplZ#x@hSBv7m z47{U<*`BJ(2irPnJh)*2Fk^k7Kj;yrL*}BGP=v)YV+G8#b~g}*@V*8pw?!Hh^yK(e z)y4r^@Zg*^K_Q5Z^dgl~(enu*cz;GZgdq^Dlq@em19Ix;9;S@Gx=n1#Gf zqG%kFMUV7>^!A^=mp*#${q&pP_%9*_qDjQE+CEGlevC1~b3KKv3ZM0%-vsk)ASc#w z&;f?Uo_WUTjs{VoUgpcEp|iqRMCSOuNQ;A}$^bHre(O)l8Y1w=A&w4DdJ~{-Rx|1Q zZ(d97p(z{{@CoY>0F|{EAl0R{f#)Ax>sssZ5bRoip{!Ikxi`vN>DpA6@YWy$5Vj$w zl;iQd7$uW;v-vpEU62RIf?bS;5_+`-5Q?^kuD@Lhps5b~T7+NhaL|f2aWPw-iSdOu z(Q^mFz=lq(H}WuwB94b5PL?PL6%X+x?y#WSh^4hU z{?^F1ZUO4YutlFE6Vx4$O{}94Pj$3Lr=v1$!hC(@a>;ZIF1EMwu!bgj#9~pGqrkH( z74pWHR<-0quf!>L&^Qa~aB*6qLx~0h0Qe0ega_j@_B1@c>>r2~-vN#(jRW=v?@P!& zC&CcGD9}HApuyQ4IFDkio}z0+t3qMg5PgpKF257q85-yg>!zfEV|1CdAsR(xj8D6O z_~rVzP8#agS(y)Ss)XySL&+K}s3=m`j5d0xj1Il3dk4na#gI0w#zOfVd%*!W+NOLd zxaBr~fuTk1=CbhEu$H404=;wR`Mf(YeNQbzRS+uDmEX8oQ;fyH@7A3(e?kq1;K=aO z3JWW+X&f;@Cfmb8sOTr>P7v;2<82T%i)}H69x*+;;ep}V7Dt3~VB;;s(9_X4#-pL- z5LAJi8$xx`M+gt|&h%P19s#o+HIZ>BZj`a!4L#3#sm()+C18qzY&0<~u4f8yATUBN1<|aC40h-=Q8g#=9 zf|g+fxvfuD?k`6>v)(hLZ$R_{jA6SpVD&37Y&%m&$awy{`N@vm{<9LdLPtPurJ_e8 zpIA7Vu73VGf=s|n`w$pP<>TpAByU4!U0|U{(MnRiY~~kPJmI$>T*JVo(;ckeW2`RQ z%?xnTOdt$Q#a&V>j<|1lVm7_-_|xek#{{oFSjU=N=bZPU^l%3zqL63{uSk(K9HNkJ z?#YuWi*i|J4xCpNd9@`3NDlx5SQNBqo>Z0fbB%@&yt}OH^5Wg}*5xbd+UHl&%{zDF z{A`QJtW?!1tq}5_rUcyM=7gbh4`Ya3L9qabs~q>}Lb8oA|D^K{-| zUG`buZPFd`DDaW9oZpSoo*$S=UwZwu^us^=!=Uee4m+^rnm{3F*NtPuu9e;u^X8&| zq>KpN1kiwK%mF$4!{Sn;{MnV6F)^d+IWx(&}tS!)KHRUjh^sA01u!-zF_Qr zcUX&T79)Y5EzD{#zxash<>5g{oe_2gPlN@b32uNc_#6RedIEQL6_m2W>!EuEyJ5H> zaQ@~xXrU{RG@b&Gaj}BSLRF7Pl?mPZNbBI{`I{S=L=Wk`+6D<#8vDgC^M9+Q$S*Tp_7*!-S#(?>>I0snn9YTC$LrCQT zZZm+Es1u0!v&YC+y=JD**q7K85XTPLx~yHcW8TUrja!x`T0!+;)K+ZkV@d*R*hBo% zP`wIKkZaMJx$7E+YA5gadF(tg6-mx`T}MOa7T=nlBmYaCo_ZzeOr4%2HDH6^$(EsW zgN3gh`hf1z`9ozh186GO$+Hza9MX%FXi1T9dY$|Cq0h?fiPXHaf`^^-EkfxTz*~e= zbr;gnp$wlNAZrf+kVT9S3xfv$&F9(s33zW4C%_Rjv4T?H0#I7>pkoDy7RS){5Uw?9 z#>n~RXMnK9Yl`tlv`Whpkun{rtx?0822V;I@V^DydWZF*dfdDzPX&N;6lrEj3&7E{ zPwHB9yWpBKYkPz;-Nrj@F)!Vk(gDxzJ*79kNI~92n!D#kDO4L2~f-DAa8tt4c(-I##wL zbM&}nD+3|(J&Zs|*vcG`lF%k!IZIL6C+P}ZaQ^N;_^pstrdECU?g#1S&0Dmzychu2 z)D^{93q2CfAokV}^J5FA(t}42(&EycbP^A{-IIthh2h|l=pNJTcv}&kU@aNjA;4Lg z7M@eyHN+`<(s+YDu@;OeL)7^ZR)&TVApDG++FWOEkiqPSsbf_YGGh{e+mfrpdX%AC z^X3~EWV{@JW<-l;;R*jUVCL^mhIc9N3INgu=@r4t6anT4f5+H7&e&_w;|M-v0t~Re z&A})XT;hyMC;8hdX(z0Oc%bQ+`yeK+7dOeD2o$}k_t$NkQElnCoSvaW}9Ifgev zz%mvXgLf-0_?JB)p9CY2d(a8-Sv-SB`ViK}Ldv)nUe|!vl_=iOsh6h)YG4qVaP5;Q zN;Pt4R6@BEMe*=(IFQt(Mx~(1vc-}Xg`qRsfE6GtTgB9IOS*(%>-(jJx8aJ$3i>rH}CUD)w4$k_#SH)cno=^%6?*ta-gOO}vmLJgb6ELy0~^0xl9F zX$Y*aYDOofvC3IL2!5A}M4#t5(2-Nj#(9F&Af5}s`lAOAgAty8`4t3bBdtE9h=UE* zD%qz{!&YE=Wyi93MhFGf@|+yS{AvJZ+r*gPW*)H~#1htKOXh%4W=@<)_wFyTwu19^ zG>X^Lqs8p9FyR0frgjK4*qQ+LTeP-PQ$(?v@`HWrWuL8dx3FfNaAuV`oO$Z0^!!U- zpzRJs4Pd=N%OLx3ZsSdPczBC^JX-V+DmHvrMyVYlsCKV!r2({Nukj$(y^j*(H9wE zP0q0ZSY=p4C>&7HJkp0SK@kqJcXe``JkvYJ$wX(KIGf6&fG1CjL$Ft+AHu9{_P3W1 z?I%CElBP-nTz4h_z9CxR62e&U+4bm2w|e{R5olOndeu8AbWkugSYUx)c>gdY1Ux~g z2nS_NkNg9;G|v|~gb+RmP$qCvfEuzkoy2{&hqUi&vX-qQ`b$HDq96b52s{XtT3ENg z$~`D^^W*GI>;KrUh?r#Ye0yS#|FiXeVkah?)yqNr5FB{v=I}tpBAMS|* zBajb8=s+P=W&9e%j!XkJzJjQn4s$|@DF`hHGquFfD+7fOP9o?$*lV=Tr%$G|@*6J-kzHuXtwWX4{w{LN^l z=ukz`hnJ&;x3)|Ot`|PZ6W-gsy&EzsQc6%Fb;En%WdMM4ub@ezcmNNOdt4)@+fbr; zVus_S1Jy~LwQ?|qCv$*3Pc`$fj7TLyLFy%L+qQ}48ToO%(M}4WBT?w#am^~Gu1H%y z{aXj+Wd{wT1A=m+Wg*+Rwt?}8iiqchcaG%^Z@=;tS(2yyoDrOE0gU6t;AVFP#L{EX zAM0;1ng+F@RzZ_#4YvJSU4F##@SH$jV;m1`XO$}Q&yeaa9eXUA?L#*2a5B;+XA0N0 z?EoW)9zceTrJ)%@ojVWH{d;Q>A)Go%6bZn4i1BE7t6h%aiiaL0_?xk;w8zN0ZzMJ?Lzju}GS6 zF0ZVl$NJ910T{}K3Lx{CdDk#d^yo%M0d#_ZSzRP8durZ3Ym$kuu4y2G4vb{!+}0Q9 z+yIo&LjBPiZf<4-ScU7LH|Z3>!~FHwYml@6O88#jd5m4s(6F?44r66alqt$R*GuoD zg`P!dYoAmT21P~^MggB`5~%zAC(s1^3q!Gx{+;pg9;1j@9VtIEm6{fq%iRKKbcr5h zqubHT9Q;JcI{>goq@Y#jM5SHMj= z<0^fh0Z?5JuF-Yp!kp2FsZIxxCUpuLPiz`X0+8qV?!U6kh5;?)u&sn?vjBbL1@>AB z^uw!zjEi+KwT^M*&=0m045I`(?r;L;AVu~^w?88qj{ag@!s!5=>A(3gvjFW16Bo;wr2_R-|&xv zW@3iFEmAWqSW_ABj+zP%;);SE296w~Dwjhim|$2qp%~d{p2?_K2n3bSMrqnsDT*gl z_`KUAI#ps?M7HE6}fGTG%)2Sg@B6I!oWI8!W`p%+l4782^&0p%^YV*JPLe!1gEJAU|h-@QL0C zY4i}`sG(v9iqWyj^!cZk$c-b0kH? zYc+$nWjBpu)PD5lhard|(zJVAN|Pri$Tb8wU|`fU<+QTALG>`lc48PPM&oRFJiHI? zuB4L)Z*q6o69haV=sJq3jB+iO==8u^K13-#qG|YJ7cZtCe(PI=yb%u=Ww0dAH{8j- zLW2S@y%rY6WfAld0gxtPK=;zHtIftO_RO$vISdj7)i~gO<=PGCj3(uTkxPVf1lC1P z!?Kmz5Pdf_js`F!p2qw2&wu}a21w@8{A<6IRsl`sm}Qxx?QiU@Y;HW89Q)u=%VoyZ z?akP2SECSZVx`N|%77xzM)I+T3M>j`ph-P??zhT2a>$?|Jr1HiRuA?DWSY+k9hpma zT-_!`2O#R{CO!v%9=WSO1u=ROZD79U=!D1V&;zQIJ3EB#;5$4kF1hNr@s? zb~(Fq&-8uYS9MiaS9Ry}d1?pgWwWQds(!!s_q@;Xea;sR!!2N9p3OQ?BcySL zI$-q}z$mylG;`41+ynJSoQ%gF7&TLGHRu$Mv?1$pF1P!8Ms~)ANx`ps*MfmCEvPQ@ z(2N%5bMg*ogpzNgIub zhB<1*r91je2dj%N`_Oh=uYgQD!q0iSISg5bXI^J&iP;CEpyA7~KhnTD*GOrzmnTFD z2YX6nNN8?mYftHj-^N%LjH;7qsSu?lmJ|+<4tOYr;KQ@T1Yh>kdfAiokb51#14YKi zVF^Mvy?ysVnn&kRK$fmgPLRETp41n%_5+N>J@BD6MjkL*QYXu25Jx`hrZ)J1bhaSX z2BR*X&r|N4Lg6T`{&96OIv%^m=3on<1a!k6#L!x@M%0&*g&sgq0cWudAlK37v1V$3+4S0v-Xx{HmA?6FUt>L#+^~@cT-Ro@Q;fwS zbDXs?@_}=BGk}3QueG5=oM@my%S`~Pe$FMG=}?_os4Zo5400OtP*_iG&yHs3k)q; ztja{{B50K%OPq{CL(%Ux5{wPIiD)}jh-p_gD5p@JvTnOq}-oSYeX^?qOo1cLXOLE?3tG8GPMjyhMQ z?v#unkHZ(GXWYw7w7TBRh`P&5JMl<+y0)~QR%n`RWRWT8p{$(oUMPX1fSu-6ye+T_ z4+cprPvlVlX5{UCU(T7p?2~rKM`%Gu+gR8*SWmr_4lQrf zL)1+P2@ZIH;8NA@HLjWGw6&jqJ#q=-!&HK@;dSaOZ@)3*j2Jl8Jdo$L-QNKp- zZ59^XsN=*o0%!QXg9%EOH0&V%j$bQl`v6%$roW{j;JdGcPTQGHCtItKza>g))Q{o3 z1CGn!qAWVbz^Klgx3@eMe}`L{VZ`$AYyw<2$ns3fgAg+AVb)G(pe6MV+8}Db0XCRx zw6dNP99W1B?crBS{G2M(B$mR)0xbhj!N|af)3%BL06+jqL_t(pn7#*s8a0TKBA?Wn zS)1np3~sD-7u*`5_D#~S31U=m%9g-bkZ5Yp8Z8B0`h>Pp@$-CwVu4Q7&#?g~q(yUXGz@sa z=5G`AD8NJ=M1BR++tjo)E5dvBxMPP%P@i#jo_54{z(8dOtf${P5i{JPLb)#MB3GsWdozmFP-4Wer2A{W8(9j^1>4mgqON!)6Jx&~<%zj#QH~6i1^e<_NIR*)z;( zbd20VSnJ%cM3nKEjDv?76gh;VLUYOvb-ddftJJvBoCCnjW8JQd--u}M)T=Kt6Y?ni z@P~hz?%chLk;Op+RI=;vBzyqCE)1}ybCYC>0F(&*=;h1spOI;rtWXB)^Q$~>DF*iM z6A3HWwOeSS>Rtjx);ks5D6#sXR$tTJUt=|s-j z*)KBI6db6<<*1a$(6J;o}*OB3hLr>%t*^hqnVV3ZZ0NS!=lWLSsO z$Wj<5bpm|b1n1m~H6xzve^Vc5BI6#s!z_*C1m$si3eEsn>JaZAL*Ow&js;Y|DXTf; z*Zs9FOs7p?p5ggT<7WZBk-5Pz81Z#diG={o^eVLHwpF^}rQT_*5ngb7=tO!JeRV{0)EOJs**9uIY3Roq*X9mPNuZ9C`d(|M z-J)%t$uxW)PT9odc$DgR%&TQ9&ZFH2%$#!M=?O>Z9&s;?QiW%*PTcWNgX0rv_5Nc( z3jhS##!oJMVqK>rqPR|#Xy7dlgxI7OwV(2UC5%~ibRd-}kFtKZ1>zBjm6RWX1_f8`QEh ze2okZB^&Um{499n_UY2-Sem|jKP^9eoPL`!2B$0;1=RlZU=Iq=_XLO82&!2E0kme# z#w-Du2M4&%r(|Qe0pLQt(2^!c#v-FKv$(`u-l2$kTYAu}X~$vX-pWKoCb^*Y$okz5 z=q4a(XB~wqd#$V?!!cKpCvy(*88$0=bl?*kmRyrEQWbDgCX-_-L5sl|p3wVf!!7>^ z+2?l_*5o6e!xIl{@|mb-U^FIy1ZcG(p9w`#K(7oMt>xY*FZk0aCf8scuCMvc>o^(# z2GCiNDkE4Ghzy?1VOvi3)o>}Tj%6fJ_0eGDV$KA+nVwSl0d^4P3dQ$sS`r@7kUTTg}3AIFK8Q1zSxWBuLvHAc4nlZ%M(Khc1K$Q7e*vszC^z!ZUY@hAm&P#|zR;K$~e zS%=3xC+E0QX6G1BR`8-VJ zq=_TImB84DQ!kNRmE6NeGm$#)8y};wV?EA6KPD~hiEcxK1C$J`M^r`+rqhTC5f4uk zGm;BSMH#~Y3eY;W);zRzk65`iQhP)(yq9&nB~zn-;kDUC6cf*=%gndWrUTGTBZZ04>_8Nv9m-BIkcK1Y&tabmoYHcSa!MkLczs8dkd(P+C?EFG4sr+9 z7by|gUf)2`<|6`?1q2#>9^v4Q3l}b=-5XD(2e&><|G{tlF3niOcxZ!Z^3z{RIh0Pr zbpr@9WEPB+%`L8>voKPW85}`Bqv}OwQMNMz%@c4MfaU{?$R5pcD(NH2aWv}f%%}Ab zjgI00jW(Pbb@$_R42>ryE~GxZd?UWyFr~x8t4t($clv&spW%RyyLT{fGzAV!rUp*M zwXc0Goxgkqx4oQR`)B`@bogkx@a3J~YODL?45u96ScSZxo>A@C* z*mrS^034*gJo~+cwT5RbFySn90be^&=mK+23BWNXRKTEVu-^Xa(%JnP}T0$fHX)z{d~eNxBpmRXS=CLCx0jFMYQW`t*-W7>_h zMGd*v^53jcP#!5%Xl=w-{lEgCvw$C6$|ju>RJeS+#vwbv z^4B;uvU$uj&&RWqx(lAMiOw0)5G5itg|QAAohU(IvP%Oedvks>BASki(m3br==S-U zf>46D>|to5t~ARg?C$}baEJx=>L8D0*CACedYpWVW67Yu!4Yj$D`0P5-sj$)l6S;? zbz=OIha%VkU>Tscn7CZp1hloIJ6AV|&I%AIsp=gkka2e*b@5E@fsJh)G%e~ZeBsp> z(%Y}!O@HHW|6cm--~0wCXPkEA#>|7|6*+n$VnPu2*Aa?+14vo=VGyB66s<)=4@fYQ zs?%pA)>FcU$q)^XjHGVXbNj6in2hn0^f&(2f0DL0x7Zhsw`J^QCV5&N>0bD(2(5Bt z0eGwc7+y~srD-qoTpnp_C%lb_Slfs++YdWFJQ@>I9!<{@bifv)H`~ykak>?Iy+EJ_~U&^$@Fo z*K-6A@*s$KFjOqm?HPD&>V_kz)<&woJBAIz$s6~H_il1$7@_%1;X0u zo>Hl|LOBkF_Posd#W-ROY;1ff^y*fCnjci}3*74K+B{xkza4XDeX#bhi*bBL=G(D@ zt$TnvgM;!K&t>4m0FmV#ei_Y<5;OG^-|oM`JDqZaUw&EI;QIxPg6<6ZChF{c4($J# zx&)9}v5aAb3?I=DN1~ON@1&t?pJPwIIw=v5R%9yVfv{aXoAM!6HTvyTD8CGpTGlC+Q%~=4 zn&9y8Ai7Gb!8h1Zw_GsaU>}ZoCN%*bERh;KnA0kXTZPOQFf%-n=GdF&0v+$*co9eK zXaMU<2i%MKiMo~`ke(R|B?p75xQ+k1DXg4tuZ^KYC^ak!*$E&|pi?9*yRo)P6o*t3 z3d|<6E~XHK;z1ZxRz^29445mJ8c+#C>9wj@RAe@nn2K~Bu)g*7dX5Wnm3pLMNKtUsw^}e?Ww^ExpXMYBg_}qM3MeAo)*~SpmIvfe z-YOQG?5uaQ{6Ik-CTjWc&OJP7(ta1tr2=5{2*KC*8vW}gYH3-7>15AecebIYq-~R0 z_e}8t434L#q=w<2oSI4#!y{>ab|xkEcnqMsze}0G{A_yg)(=y8b2XJFh;A1L()i^o)E%Kf zdPdU3mF_r-({!>mU{wx;C~@rI-Wo?~K7K$+2!qE_&I+)NEj7+7FXTvll4?6-y`2bY zFDdK!<@?Z{F+y9b2w5-n#uw9A_By%@C#<@greA+89nRcO4`@~^1EfnhBTqg1Z2I)i ze>Dx>_-ydxB^vQ&9^Qk#To;g+A*EU*b+V5~dql+TtP8LPEqX?Y3_*YStb=>k1P#;z z^QSOaHkD~avUr^xlrppc>^kA;V($`048uV5eg{X%!u=8{pd1R<2D+bhGB%_wZhfZ5cz2P$2Y- zDIzuB)-g&`kF}qGHNdnr!U#-w)eRy=RhrbQ8}li%g0l;c8{M^8uLI-OfkHom=OQx0 zzfofV3JfWF!U&vE5Nq4)C~rVFf|Tf3GeK}13?%rKX4dnLjE-|4#y!?eSwP=l@TQ); zwX6-+Wb;@=Q~+y~g5$vX;QZvd!olVlBFf1H!$zjlHyI!P2wcZPNdw0*`SbH3)f9 zrp}et=p9lwsl9MqnHi(dcj!l>mO2J8sGTR{)~sFGH5ChpqNGOya=2@K9@!zll~K|h zU}J;YSj#D_!S-aw13;!zEvnqhNU_sCv^*Y5e~Nx?MUQvkq;0~peSJe58P2Jli;t+~ z-H6OY57~=Ni>ozYfE5=Ur(mJK5|N^ z<83zr$F)2rSo*K8Qg@8kMFWoN z%zY^Xt~vpmX1_Xd!lT&|dRYddGiC#InN!T1Hgv@A9);>;3~hnk`==OwPvkT*`_e1V zq(A%4&Gg^?4}UNHH~;m2#C%?+9+BpQZtN~TV}i^E}{Wf=$Qv_V-C{M$i2^(kwClL5Olz3Om4cl$TAmQHDayiL8t z510c%1;`i?MFw=d(00%W`CyNXPC7BhECS35PZ5dLaWVr0-(z&yO!&$g6AdnQE|iad z6W|Hp1fv3VBfXkE=&K_XxCK58VUb_aqluB5g$gU)0fa*{Ym*J;D0?;qKEWc&yt+=M zW{-~^0axmdy%g;puGl`j@^f@f?>SqPDKn8{4TGa|-$UFgCGR>yzzi`?aADRt|fex_wNzW5-eCo;AkxFz~wO> z)O-49)?`_jtst5%J|LxY-3a9KuT{8Vg}P($+|ffGvi+!&u6XHLNN$|EKa- z$;QbaFp*<+RQ7@mEu?99Wq~)cPJ{*p;rZ5r4x>vpH*}(8qEU~}YuJnkw-6QUBl;F%r?rza2-W z!L?MN%!Y3Q{wfL&^fmOqy+P9r%HQaBm2_1PO0W|UTteZ_liD&OQo-|#lSsIRMp9vv z-V&qfc_QmXLyf2!B@;wD|J0@g3%*V7@e(z%9+ynX20%=Zpr9#}($Xljf_4b7t?<5y zv58dPUSm8gBhqFcrU&o6mA2<+QSha7>Dial__Hsi{M1F7+luM_+iz3nn&FIajuVBw zve3AOVsqYVoiz+`Eb}lcs?fdiT+PSVY z*1DaZ?t`zkSJFp6`W|yA-%ZBX)sOi0SDGhz<)9Lc7ucpqYUPuGay_|Nqe;YNw z8_=PQGTA2D?ZEdeb$NB}8;5^D|2=?roo6i#472}wlx-Yk%osIvhc$i#Z$oj(LmJjD z>ixQjPI!cNiT%_lj`Z}VLpq?VI4AWz037%0@F-OTwwGlC1pLyYseoVq7(r0%JwQfcgZWIGNw7fBiK(X2#z4<%PbNtKAg z#MAp87GG+90{Fo4PeEtin{8o0t=L7LXffM79uFYat?@!D%SMuQjq2 zkdkL^Mz63Op%+c+smuiyZ5RXVROj0pSa zx1|iqxpE!H6|=@>sS}_Oy&9!m+|RNSpTRGkcs>xuQiG`;b~3}%C{3pHXXG*FI#auw zyBKydC)_vYZ}XWdfEV&&WZIdN+=rgXrX^0V%Aqd~0W_3uQeT@-T{ym7@Ow9r>Kak+ zb63aGy}OTSJVMZs{T$AMB^@5`ZLOlTu|c*X@JD`X+@u?$R>Wy40%S|1VV$L2gm0~9 zbzhCht1B&4Q;!*eK3tzmV~kfjrgk`o(`pT`Wi&mETQhXRM|c+W6`)r6YbS%IlWm!R z!F8u3@D`?0Y^6(Ar!dZr@1$vrgD_YM4PO;H+^MJMeI*=XPeAJ>(^G_|jt@Jep(dJf zcm~JORWbY*u1>}b@_+G9zmuk>uB5?%L7ZoJ5V{y;Rh+f@uE$yKA1$t?28~Z=@Rh*X zXSE<>90z|4KIz99puIW0|JFO{!AG~#fA(MfwE)qMPTLgpLU+q&)awP-WcptKD_3pf zIP4MC9w9P@TNqfz=%S^A+Bbt$(Ll@JB5jU585!o+)K8GAYW*(@mT#Sp8c|}dYxdAg z>mHGOohWA|TdHxyAxPi-(~pTp(jbV;p->$a#a;t)1*lx#dTBwwa>AxU3S@(zia`z| z?r|C&wl07W_b~u)N;B^XvhN`hr#M*wXyXaF0H3$iD|B3dHEcQ8f=Qb{eHia0FxJkC zZNi>$Z*_^;2bwZTZEzj23GanA?I@~FW}Vj;=Ans!FqA**k!A%l(c;De5n(e1yV&0r zu4SFM7(_iHj|=RjpN@t5(d@%;ANUQP_wP?;w@ce6U(!|^E!Z_W?8r2u&CS{5U>-hq z2z_-(YkccNR@P@zH###zjQcMWKrD{w1q2|eVFb8f zEL_2NtNb>NXxcA}_iDt}NUX+022fe4sEv|DV;D--NTC2%V5J8ggZNp;KDDu~+m3O) z2kS&5mB|M1Y^<&6@v!0mz0EoplsRjYAQDqSZhAy288(3Hxpd#)@0LgiV4MG0zCl!% zb!r&d2|y#D6(byz9AdYvrGwZz+eG&`#p}XYFzedF0)oFyW6}`ylr%@6h=Q?scGI&) zAbb{&NbDt5D2VSRI%tENk);9~rr}=$9>TrA!H)krv9Lc+OqB zmX;Y$v`b{i$YT}*VLi_Hg>i`&fVW0jLLZT6qZhYc`_uIJ@k54uccybMy#%P^{CN~= zKSv7I;N}d|651GQ-e4b>=N`ep+0=uQ)G$S(9+m}hCJnkn$3D?VUfUtU+JSMD*6Y&` z)A_04ba`qpb=OFnEc`T`EZyNmvb$;S@$Iy=y2woAi|oh6)Cxeo`o*739p}!a9E}^E z?zBk?XdgNoRnxfdavuxmGdz2gPe5;vZ#C*v=ig6%>Qm|7k6vfYP$gY>=H=)XuCfmu z0A=m7PLe>k14AA_iT%h?Z`g&AZm{2{8dpH_03*asFg7*Pz85DaIJQ$miQp2=9wv(2 z$KH*Q#?4a?cx_@L-TdC4uus`EaP?CtN`V|}424OJxf09XswZPS4!j#0)Osk_uAK3= z0J}{A2!1hIf)z*E!T1^jt`Qz8pD@MiXSiS}HaG)tL|F=oeVeF+(=9k6J#8*MB7zQ> z=UWHUq3nm$&}B($WvRy#h;*R%+xhG%b#%ux6xy(4Lx^CtdAg#2i*=$4jG#?YI)dPK664P`)}Yqz=@>`AsJhi(!OYpkq`jj@LiKVUD{cMi9Q z(Kx?bU1}Py3_V({=~A!46MjaG9c2-`-~BYoZU2MiMI{_0k56tP3syMVOf9>VIpHX@ z;XLZ3$6+_FZEL2S0s}|c@2)UI?qvD;lOa6L9!Zf73FV?CG7763{#GU%-c-qREaa3+9L$Q zvJ5jvN0d!H{OAs~gWLx`J4DZ%*n|u?4RFJnJUW+}EDQyQK>(3-w|H8`H@ev>Qe{TLDoUlyf*n&8Sr`Wd#D#Xaa^ZeH=PafnMs} z{jD;A3IZHFkG5qHfGODc?jOCDo`3O0{;nJ#VbI0{8+1agBUZ=XTi!^!MA|X{8fW45 zvlo2;mtl1DBuz<9D!F&}UV8T@@1(E)`k!aA%_hgr@6bF<`H}jawc9XlP{{pvUa$vs z*hG1R4Liay7-iqIpJ78;d+;RtqfLpZC9j#W^vi~*0`?B6iwRAe+ssvm=0-X_=~FNk zHlO=-px1Ri44z0f4u4F4{KJn}2OtNB{*?Rc#OicJ9}rhHQ)9!dv)4l>AxHLV)XhE` zv1PB3>X_Nwf$Y~PiSQWiJagM^@|1RgTa4mLOMx7+Kq$KTQ#x>cK`W7kCWcWbYQxok zI1r$u=I3z2NnLS0-lra6|4@cR18~C7ty!GV#==72a-SiBRM4-TJ(9H|vdy?bijx9b7?>7>_fXLvx6nd1MgGPfZlXF)H%qPANL;0na*64n1$|c zVlqoiJ?&8}p)U6A(%R=xr?tcX!Gq`mom|WPbdD@*=wT|B0R!=xx=_AVww<72#=RB( zb7W!m^zzH!HhOI|Mnw#117vwGJ_Yf3Vt@M0#e7(qxq;!A5VUhd)T$wY+&vk`o%Y{c zPvvMWu!h0fSrNHNUgC@9-9U|m3eS&Owu1W(o!gf%h?4NEk+UGQQitODo4ggQ_lorVP1bdbxn?^-iwzRAp;FCBw4)OU25Js z0lysxlWDUP1m6-2n+gU;2Dm<(2Ay5Aqkl}zSr=rgvWTLyCePvKMw1)>-hpRhxratm zZ`zt{Q|A3dL1&5Z?r>0pWd|y5m9E<<&FtJ$E*Xi9L8CAo>6Y&)!jGVVA4%7O?~c_)F_eO?;T95L~IAnUtsN| z3~_kc81-{fWWFiU`!-TV4TPtSb&b*ZVrrtnvq|$j%H`>coVLVCVs%rb7|M3m*-fNt z{^Og}vCXF0n?GVK*E9k{bcMZm=I6egE`0H4Q>ECKic{wT>@G6hVG{$rN-0MR-K{;; zy?HWQD-j1Mfsrj?AWhRjO8{9vB@+U*0zhSLaW0+5P+emk71Fz_j~-IGu$8{_3$Nl> z)QNad56m9tXpAVr7Z*o{QwiFXpzkEcaExoWVW3+vf?aGt3t+cFboyv%$xut$nVCsj z_a3B;TOXyJhxgOU`yZtF_ik}2&BOHZ4}Z+L={P)y)dA0ciVWnrNA>_Yz?c_FqqOq3 z0{qpQ~e8f3|p2`(Gc4{3kr61rZzPmXbcTmh3wjSHW*qKP zQ@umf9QtOVpI|rUmWLs;eo{S2?Vk@LJ&;!;tPy$UK3-bzp`mm?&3O5m>(`L@Idz@& zp{D$G(&DI2)@#7Oov9_A9L3uW$UGy;lP8@!3^1~m)Y5|h)EFTfPt4+hF9S?^LSq(V zIX%6BLk!Rb*p!zkb72IbQ$l{~(gkHx88o0L{$K^6m(>RblcL5P0Po*jXXqekW~5V) zES?EL=Vi+~syv4_J2K7OI~rqoZytQo0!{3L+FN|U7)^8z4yg^FZLC+a33e?3Igtj^ z%JL0wENDYVaVU_6D<2VuPtTYbw+TGXQ}|)x>3MI)^+FRhs^%lMY&I6-D^-Be~iHOVVqh43Ln1tMtb$e*VA}U zHtj!NO7Fb&ewukO&9sHxX!L8Lj_k#M=P#rKx>L(2iy20}wH(=bz5tzC(*_xVE{yij z$XGZK(y&V1-QJ2d$T(Sm!WInT(e^4S$-XosxB~Rf&(0B5J|82S9)J8=+HIr!hDgC* z`1-G;rJufsGy8FB-2PD-hR+V?Y5b%xf1h%jDo)B8dGiVpz9Tx?bu8LQ2RmZRGG&JCj0m#$uCFF6L8J+0u(mDpE-RW}CfEdcis^43Rr#WsSUKP9@v31KZn z@iNEIi~3?af0aGEc=?6$nW5B5WV(pMVIr;WT-Ji)06b1m4}GP_eN!ncEs%j6jv=zDN%3Lqx;?1v-w7mTKx1*1*~6&=TU+HopmTBeY<%_ zE~xyX=u$_>AdY3|D0EE#ZS^WmF=Qbk;$iwdj0QB}S^UZ|pVf$^K?A?czS%r!WH)Ad zLVFK$*~jU&T;mLRbXKp1!azY)okF8sJLzE~oF|~LDuO&bFu?S`_LvZ`y@l)m>YRmM z{|ga*m8{H3mvsp&a8Lrprt;7 zuv2JLh6deaYIccA>wMIZZ`0J44n+Th*CM!6q(;jUi#S4?`#NRFy7kLXQ`d}>_4ohY z@29^>DZ-^eMiUZIUtOZ_poddL8KF2mx0H&{o}-L|)9to)qpYj9gZgSd_uj2L>Erj_ zOW*pPe~-2Aazy79(hyGC) zj@|H?AXM0FOD?>SP2_Huvee3Z4Mtrhpfj^L&uf%iVDl^RRU7ND5!I&X23aaaQ(G#J zc?e4ADApEHLuAGVZ%=%c4~=+d-5);0P>ksLQP7N7B0A^-0&F}7-dL)kC*n)&;+H8l zdB|w2KYe7@5nqBfY?=JTgZ}@3mwS;)K?sTFifP~-1V8pUuH)atB9;+4#ee{l*^%fu z;96z^_YJ(EgPhvrGwLkl*+ZMod#I(}*x~w?HK0RK9wCc4p2;klG!z#_KN0jZVk?ky ztzs6x-8#z`i}lh-`}%OIcn#%nquN*xwqMC8uvSXO!jzjqJ;ca&cP2YT!7yQ%vM z2l@2 zYvE;ai;fHHapIG}wLr$ga#MN1K!vmXbFAIWzPhN&@0Pw;1z>+jvyUhc2-^U5)YfR% zAclvtxK44Nyd!?<0$=3O5e7K+a8yfTHZn|y=(sF2z`-ygK>d)6!9v^%y%%TNMjeO3 zjUrdT<3-CcW)n>N)lCDcY<-_z83OO9H&z@3WFL7@04(>hW>4d$=ch3+4Wf}M zq09<&|5_6!u(Dp)<1*VZ)RsZy^ys+vA0MV2F`^a^;tC#pJvvHZ5+f@Lbu&6FgOTO~lzhRA0N_PV(up)MAf)Tk1ybhF0>zZW5IX9}h>yonwvG?SXcieafIS8(N@*atT5oHLO=gedF<}1jpO+UG)$tS5B8WD9|XGl4a5CHAw7A?gOS>C4^u;3oi< zjx=`ZX-)v!BXT;LN`T9Ij2@bunQ35kBA}ny+8Lj4fPDsAqr^952Py3 z@#z-^(!2lq57Ydek1-_uls1&o7}4A{qT2uLfBQXvlB17^3If)?^2=XmCS-s5M}PP4 zr2WQ0>b-o42AL`|7XkiPCIFGt@&V5Eb{z{-Ypk+A-AtSSfS}|H@N0iN{6*xm&dFO< zqIB|xb#FcAQBZTM0MtraUtCOArzXRDeDC-FC6Q`} z>C=RULN<*&IWk4(^lC&1R(k z_3!-8>6^dxml+Gyn~pI)g1kIyCr_WwQ7b-k_gtP^J_7X%nWs5>bG>{veP$L*Na%RvKA$#zz zVAsrrsd=MChIu?yaAjed4=bmR%AvYv>Tu+3F#!fhj;ZIY6+6 zftAE-MlcX=e)o&(%FO)DRKF2VY-{|jLi3Cu#Hfo7pQY!o#v!S5RV zNWU7o;09roF_gV)Nz^KoP}2Bn}Qk1TsJucNZJCKBn6zZq;e1@wDejVHI| zqC6GO>IrgZWG%`N_^b_APSJ2Sxxm2JEbYpfb|lJgP(XSZxzibG2NVkO%=8#6kk^g) zcpsb6G_FTP$h(Z%0?yiy=PXg)nOT}*k;Qeu;L;||U;;>J*&)%RhZ-qMDku*wq8wCSQCZO@TcZf*|gK2Ce9Z z$LUwU^(*N%`f`}eU=O_l+D%Ka!VzNM0}#RMmPp_p0z8_IULHQ##eQhxw1u9`ENG&;ZT?P34zyby;+eB|>Ho3ydgi0oc?W*sGwT>%zPB zz2XscP#|M}*H%ZJ8P|60=pn_8&Zz*rS)6}$RybaQIzIzX1{0tI#WOGy*XK3t2?~k7 z5lN?E*+^+CnJvqBecu=7{F2oX=297W(S>y&}+z& z830sW>IElSmKQx*${u>QhoG1|Yz2U2I(soY15+cs{9rTq zKNvE~3Ah<6ZZeOI>F-_yMwEBd#BpRU53aF^6GCvkOjVLohnZju2og+#(5rUr#uIbF zp21Wq7@e(XZh^s~PLxdyg~WRWsZrB~p|dMRkgT_Dbgsz=Y_=8whPTehjh6}h(ld?l zzoB@(WCHK2Y?w+2qsfl?=x(f0oH#Us%Xv)|v<5jmkGLjpGD3>PyOBntEev-Kqe?H2 z$LGNWCuO?$sSUmG%FAh;G`G=!BDHOGhS_Tj{hX(?dCaBWOb+Yo2$!N!4*u@Fo!#33 zz{_!DuHB4|Igz1dG*1z+(QkKyCchzSC{dc*t84lwadx*GECc0HASFD@F;b8-_wKWH zg@${xW=XP3q@b;R2>e6(gMD{@B3-~|ZETa#{NhvmKPgWXg#vv>jgAWUPz743rnR}- z0AT9*C>a=g=CeF6Qze!j(#&y?#-18Ty~8L5_Dm!1sb+S1w7{5gfCn{Zp5illczOq3<*K|n!y{(c%BquGYq-%cVm z!_GR!*~)0x7Z*|ygEarf{q&E&{|9N4dAC>l2WglaO4mPiBlW!W+4SxkZ>F#Q#a~ao zlQf$Fc;T#wyg5Ek8a~`(`1do{(+*MO2H?~Pkxu3=LUeda#D&s>jK_^4W2K3qG)JB0 zk5dt631CA<$ z57@Ia?psFZl;BrA^*$mt(>LFSPl2nZ4|B-aC;A2^4w@PbZJsuSD&^ks#l4z5tBk_2 zEFqd);Q3Z(r}4YP$~}JB41?=4n$kuDy#?CxTPRDPBu6w(fG%JR^?>d(f(<(c*t90i zQY#%V4VTyP27&{1lMZAU1xigEr_@04W>|*~v~}~x0->M}6a;J<^^zx@iR#hBUdLy! z;X*JKvtzk8f8y`_KmRidw#WYIJll+d z$JNo3mH2a|dqk|5}IJJ}EXzL-;=9&TFMq308o@{5- z+3O!MlCMO>{@etOYBYlFq05LPr+#$O0pR#N2gDq|#i%2kxja)#v@bDCUS|N_<;L;1 z1gMYboW$3dt!@&%#W6w8VcnS+1P}52TcpR&7OCyyZ-NUa1)MT@#-_6x{MG>=ZjgNo zV8gu{%*QnfUN*YLz8tYnb@MZ@;|NZ|+ zfBV1uPZ<=zNT-f|n*2u7B%t>J(ZdT*J)e3psV6i)&aJJbAOFdB(=YzJUyXx)asZMe z0z-ZMBhlb%`d>#s>O8rH&uSx;pr8zYsqwYP^0*U_EJ-o?3<;1~WMG{hVVa6^ercCSSlu+VdONuBQjn_u&Dc1AJv|t%qV5?Gw7m>dI7BP|D~&b%cL$ zhJJ&HbG(X7c<+j@u{TZ%Xrr!{xHAqJO@K6{9uwh4g|Jw@{RjIH4!2lA8DalIUCcz%!A9GvtCdP)^h=(G+moBcM(=VUNuUIxAe>ktYlo$~zGd;b#WBW4&+SNjZkk zS^gtNIpy<_;b*Vy|7!2198@B}W76Aly$GP-s5jg%uglK^`ZEe-hD!qj(2@7cyfV3{ zT;L<1E4UCATj3%_n4@A^Ny@VU-c;h)AQc0Y5#ksFgyQeQK<)!}OyBEe9>WZl24nb1jL}Bxc4Y#Og%ymsUXMbyW+soa?bz5Tt3$U?Q$%1Uu3&yjLbM zB5TwvA~&oWK9iKfO{N^C|eDLx~trg*BPzs?v~Z`YvF zp;kR5z=VfRgrF1I;3K32DUU$GmMr|Yo()0Cc95P2?D-69E9=yNvAOOQLRe+rY6wmX z3@g2C%!o6(xrWiCkPzvC)=?22?%}xs2v4Z9glMt1e2~wez+p`FMs$Ayfbv9<$`}>2 z0tY?J67AsBwfTFg=c%XC#m~H))~FeKJUgGJo_;PB`iR!^*%*!1-$xXI@o4~y0qUBH zL=u-)i5NMyBnRW}tfa%`Tw1yHVLE>UU`XUGacDa|eK zq(!1CcR!v^UCjC1f&Y%#=XQ>8ZbgV1dfx&mjH_MZj=US09LHf`4&~M+hKO!vjL-q; z!V=`T&_A3yI5n)dE0^Z!CVq?oUPtq9Kb~dcz!shII2};>Km6_gm}7}EOl&x0VnUr6 zqyR~j%jnct`tXCB>HLc?q%DB)5yHMpnMMa0ht_=|U`khBdM+()ucRwCo(9+vJwef1 zrf?*PAR?#07Fn-lFwg$E7nwJ_l*X=I;lKe(8TfCQD1HHc>O^`RtDN)J#V8JsW2^|iBCMsJMDI?<6Zahe9`aCLeL zdOIQxM#&27SB8lB5mBHFQzD$de1;-BJ!)spG~KT}%Lxuj7Jvb#!!JB>$d$ath2VWv zhKj9@ED31xmWsezIq2M+d8^XX(BWv2H8^Hki(Yzj3Inx}yrNEvj&Tf*5sfFt*GRol zfEeB`cm~`n<7SRLrdc4^rj=l?RJJWB^JDhL8NOjb2wB&i%!15#6I0ePOB=p2)0F4> zjV##~0E<>qT_Ay6k2)d@75cz=m;xkAB79bzZzIQo3iVnCAn5^UI50&}1z z&@a#>n4Ej?xI}vaAY}6lc=$X3S3s!0Tj&K+6zreI8MMP@9I}2hAGGyx=*4gvYnn{S z002M$NklK-Qz$cbR$< z#unRk!1~Q(^iYRy`4cb=07+)iC^0(JW-Ff=bIsgyM#mvj@>B=5$(?GA ztBxgeu5)C7kT*XY;23%yv)}f_I06oKK`+rt4BtB`!@!R=PaN?Ky6)x&A2L$0mr@b| z7!(8PSufpmzPJ{A9g$Y<8A?Z)gcndI=z%V58F+*DBiv*I2#gUOEeqy`#wlT-#+D}! zfcgaBvKHSPJOiIc83OMT46%SFzxZ#zc-`{@c#$3r);Kvhm>k0iZJin1_v(gcLWe+T?bUdg5xGegkn_xumD0A!4k#mdNG=c&M z$%^SL8`agBXvL{kUx+D=xv%xR(m0}-@S4~|-B#rKEU*d3KG7Y%X6aI^YvxM_&8ZDK z*jIJX{LyMUUb#!ph!JTrMli-HdP7~W?(qnE9qQ&dNt|M8iTkl>TZ=1Apw6?J zJ%`PMR#bt;nj^Eunw?v3Q<`HOJpkqmy6U*)v6<4vCVm4rC8E50lzCaF3me9MiYFcK z)&floN;c3N+1&Lne%ok{{)Iwn!TK2j>bx^+*-RnS0W)rdb#Rd;;es|t8G(rOz081D zWV)`5fN5a#s+=vSm!em#uz9G0^Y%_ir#VN`G^QPNUOS8p*VSek>2=PmUmi_dMd6rk z)Yut`b#Ab=t2S+!Qt{N5I8&O3lNSWD3ar-=q*^#0o)HRNC>oI;7(~WbaQr=(LiGXR z{oQ93aL9Z{c_XsIyA^(=%EJ}d4fq0w0A;?;$gRWmWm1GFM;IZ8@y?<2WoT)dA%bN| z1HbYdfO|X-Jz^D;G}WLXh19arnaxjWR|UX1jX^rQu713;BS62v-)p#<1Ewz+)$O3Z z*Jm_3ZbOMo;dLF6wNJUl!pvMSWDLLO?>>*fwuF~d!{}?2xm?`GBZg%XBUm<{c=D5V zF&=W^7@Nh#+0-?5DP4K>D>NZgIH+MRUH_R^00+#u#4y%~G|0rJ$a;sUQ3H&c_8FzF zY;w4V8d88S%6s?m<23)^b~-;km1ghUN^eujurWQ2Q_0C{m#?Q+e)*Tv(3Q(+4H~HE z=4e!`vaasQbAY*Qnps#(D;yns!sxfk>M}KW-D&j3i%d)4#5ImpJZ2tX|1gb&?8mS* z#zYWo^2#4=r+wz{K7RX+boaeC(=ri>`CA_`F06yj^y}%%zx1{A(wBZVefE`?(!KY8 znqK^KUx?Ghc8LVeFE2!-v;!whh4#Un+oVNF$pX}h(0m9bZ@0Q&LKgFHZ~V#k)1^z- z0<^`*Joe`VLtrhsN^)gk0fl}77${Twy@GOO@(6sm@I{_F;*s+cq^X%WLP^CaQ#ej>=<4OoG|0gJmfh8Ku(A~Ps>a?&vjcppgv6Hs z3j5&P*i)kIHi(^JAi*xtEE5?WqPxU_czZn!F}iB@!<*=G98(m5$K!hPVslaga*4;T zJo+8Xs)FZk*54{vP8RUoc2j}*{xS3x4yEr`uc435kn5<64v2sA%V>_@>JU#-)X7y~9f2lPR(CjoK%;9qU!ZB(0!xnd zxCqOljBXhf(kMHRHllSH71wGO$fKBRMC~Te;cWG^QO2+X$i;ZUi#k!xleJ#oOqq}1 zExJizmvQ=XqAvK_@nX_qWQ>!pkbez*AI@(D8D3f5PIqrlr)}1dAy82D@z}Fq9HVIL zPh6PSEf;fL6uM$J0K}GO03uQ9yr)SQ5E##ZL8Kp|pJ;3uIdUXX0jKJK2_n}n4bmI2 z%uN85tg{OcZe!#Q*R1ooeS!(tJH;vA3 zQ@dtGJy!<)q`8@S?s0%af%B)% zU~gg5;8CO861ABs_om1Sd}*fC5;;q0tohxg@1P$*#^nGPtcCv!IQc&9g}iCDm5&Dh z=?Lhc%UJGzWH!8S=osV>9(6*^I=UuL&_$WEY^H%@xVhIzKls519P}_iBQxMc=5#NW zvDkOy6_97$yK+OWA)S&5JmN;hk@J*#vG-y7;>OK!T0u{)!5VzVSg1_t8rer4pb2`> zbMBS({c#O>O+Xk97eA3h19c(jUQ7NG>>8dA`Q-94iuJ3?3i&F>1ZK8y`h)Fpg4lm) zA`r6{TnES5#WsqUC?UM@+|y)mcge6iOO|_axyZUG+pIS-xiE;BNSdi@bad50?u&Bn zUNrj$xCcuTW1(r-Gc!2`16d5;(X@{f;ba>FfyV}eu;C{-dy1(60&$uCh*M4E;Z~eb zOU2?L*atJh>IqLs+rkMvBj{m2KtCWhx3iE=7T+fmP5%*mq9fmelCZH>ofLRY3<$lQ z9^><96D!oY8&s7~EWZl9#Pip+*IzV5suAea{@51u1GC% z5682_F`_(NpnpDWCd1C^W6}yiNjBbe@spH{(GZu%M|imzb>RkVj{k=sBNQ67xCd{t zM$l-qib8_&YbaYcQ)bL4_*@IJ@N8@@{}9}icQ=_SxD^IhA<@vavEJSb7g1`^6lHFs zO)iS}Av&~?rlyLDY=j%_$Qw%vtoQXCX%&*Spn6Ra@n*A~1-VZhU5q6`ISPpDY_zW% zjnT*&>Cr%xF<2g#IS9Di2c*tX@1;X4b9g_MlZ7#+MthiT>!_xvZDvzD0H#^IY9k_D zG)fp@-=>6OjnawEkqJr_o=?M+FLVr!#Ni5hR);9gexig$rcQXCyvFd5kv&<(lghtrp?($Y3`#B)17zUPE`!#jhA0ZQ!jlfWdTzxo7NtF zoR)9CnO3NE+g(~Bwc49L{rqRsg)7(6GoN`OUA%go$r_W1=acmGk;IX*|EcV~|gFYtyD`yHaTHvK%fNeY-L6NAq@$7xj;M$7&5|+@{2N=iyGj_at6hBJ7>sRGKZ3R_#KuF*g^736qxGw5i>#YnTIA<~t% zAR}Hu`^kQx%mr6ZB01_fWlFuHlO0h(0XSbiuXVykjoBq=M_nw@MJJj-lz3g)jix?m z91#ojhwF+CWB%q(@5K$m2@X($9_mGA)o%tISR(5r(u)3QNAKtm=olLWh{G-TyeaJ{ z6%#!3IirTkzU$Um%A=Fy9i!A?WTE3TFI+&p>S>X|?Yl%vb}-U)?qj*w03FdiWOC}z zxE%-CV|n+H_W=PXz^)&;Kcq%>hjN<{>TYv5qfVHyIkJ0-+1xmMt3+Ig_p%=71s$RU z&gh!-@pm#^(jLkqI@^*y zji`;q&C)yg0@@eh!*=*h9#bEi!BY+okgo&EAN+2?N#0{lF`+>Kzs)-gYM~BvgIUMp zSI(v3sj>8L{?&KV%b$B09dMQw=ayJ=fBMcJ{NL#pzW&v;wO>o`zxQGK>D%w8$;rty z%Bh<=7&cRz!81tm#7-Ch^4q%300DxJ;92Yj{N~8ZL9#qLD%y2zj^IRk$s6*w^|)qN z`6Zk^c}2c+GJqICK9_IgGwrLRL`(4E+`=M188OBE9kI2w58(a&`)mNEV`Tf9o(30! zZrsGJ3Aroc7z%bh#KsxzsehD{_4w&q{GEM4Y4UmW49^>UAaG`V0a6^~?mv_rzXi6e zK~Sfj3tR$VLa{V?2|0N3o>2TMf8`jOgwlksB2sKk0(`^9^Gsw>Xu_>A3*K@$Wl`** z{tSJL%@UG!alq75&)vY!RsX^}$j8!r6zD-gIKO&Lp4FGENw?WcC)H^*zQMATGlH0G7Nlc8b;9XmIo@3Z z{}6D4Mp33EI^~+Dlr`)_Gof13v61z;=D4iO z75f-4lSe{dx*z;azH<7H;Fkpl-&A={Cp~!-a3`{nB}nD^1?&a|4G>y-W6iGo?J0}0 zgmjYT9Yl~V>nacwH6W^k?pQ+)EYbPOc0BjRZ?|EDkMU#;=j+UP3Y1=9gyTikDo#V| zGE5k2ctC=}^ePD%+7!4AXx<)KFcOLFhDGe2LuqqR02v<{XTR7#ZkpD8*wo`V+n(u{x#@Z$S#JeSToIb{r`zqY**3c+H1JE7N>=Tc8UgYq$6T_x(s zSnK7hPo>SxWsEC^5_${)q8db3*NEc%>)-!JbT%KRspnrv<1c(Bb)CPMmUfTQKFtaF z!O_$^GKoTXWqIM5MPSmpi?R%*^9AfUSHPxG29IVktNdU@hkiQ9hb^fZ9 z{9#TxExViqfn;!6l&~T2>S_<^eazaO_w;qZfR~deg0>NmIJK(((vlGojZ3a zJ$mn_%wpb4qc41skw_HgE3fQ*H2R>MoWkK?Z^R$R31UF-5*jJASjhM6(CyS$nne0&?@k8@R|OP>_3 z_Yl806Oo%CivU*WV8q`DkMkhQDn8aI@Yew_KG98S&i?WWI)$bp8d4A%96%y`0NV;{ z+ zta1XCa?_5?xXK*#JZ5Z`HPJ^nD(&2Bm*~|IWiO8T+oc?4hsaPXHT89D!#+oovL!Tk zb^=}~BO(G=N0zKxK4rZc`2ZP~F+f8W*dz$ggSF##40G5DZZFs|GAFg*OQ*RElU>R3 zoTE&P7(_R>U>|npAEjrXI>&V>X(8f^>V}6;0UR`-VK=t0d&m6V;MtGheY@G^A5Vl- zN4s~9Ez@W_($VHrWytq=HUF+HvM0nbYJ+3_01Gjhb~Bf@X!13?u*UJjIq2#LzKR|k zc5QxXo2I^PPM%xC5n}SnCJ{8&VW7hr>|LT%-}~;LMI-44?|q!!eCy5hGr#zAG4FSm zX5=^&0{e31$^{yC$YjAI)=*o@qf=sJRo-|q``3q$lfIy*9q`)LU7TsB3tYN2KXuzN4y*Ty8M0g_4ld%kb@^=$PSKyrcg!M2Lmbkd(FTaB^tWphUshN8E$!N0i? zz}-?bovl+HGNSg}2zpV%2}{x<&h5ca(4;wK%KGkRJ@FZ^TegOPKTJr^uDjDyJMONYg2z)Mi4sCvMc@zOaxzw zTP!~!#DP;}P!ng@(o5)MP{9)`Gn9x*H>2sL9%gA%auhUBncM8lvs*jv9u0TY)4{zr zX|JFmo|bcUN}VilJJYCoaGdPf2|gxZqwQ#?JWsq~HY|dsY?phk?iK91T5XGgB<+%w z1?uSrII-WuT%7F0`W-u1;crgv(vei3`FZhNJ32*-BzU(mGDdfD6@z$YO_V`xWh*oO ziO{3CA*P_vRD}^a31^7hu@W+WgXRIMS%(u9P6Z_Y6(Nb&0!!n z#lOW^7lgVUVW@H+qcf)Tc~)pBu*D1JLAW(WyU92?08N$X?HMW9h<3sf-e0B?JbJ{a z>2rVPsE6@hDjxx{8*II9M6v{_GCW_+Mgc*D`-{irQ@I8AfN2Hh@Q7MVLBezKsw!uG_bB51`K8o%?PA(tBvTb3b7PG@91txns5rTy zV>&!#O^+%%i%~>v)T(GS1zbj)OpCh?g}@n?rfhfCIiZF%dP<5L?0u!Th3j$dxSLNC zRzD49dwfR4XQLR~4Wn><-^Ut_n(b_CMY{Tcx<^4vKY(*%ZY3=*%%)91XXp4O>XffA zMIlebWS$0#arRq>z=;D*W!Q#+b2jqeD075Kk1fpHOX{s6lEI7V<$wDZ(k@;#6ysRbRchp@Q9?k5CnwU! zAKk+9Mi6jV+H%yVx@QP=YmRx!I492JoPPWO8DKQo!-whDzW(p0g|*p;nANDiU7!YO zdb$>eP|PjOrKyV-m_@lnDFt<0@LLYqId>jV#=ebSoFu)snl3&4H0evcQ`V5-`ZBwv zF?pK84yYN<(nzz8bK&VY?Km9=D438sl)&);CxM|LiWt%(qE%)BJnGn}jT1!dUJ%Gx#W!i#5sA3!h@lTFDfsJCNEiL@Wt52YmB<6`t9~B{K&)hypqlB7|rC z?4$qYOW8KE;YnCtOZo{aEgKN{xANXBGO6%8l|XujzT-32{pV1WrmqFFMx@A6w zBMOUdike9rGWDNPIbLl>?ci?}eq@JOn+=N^TpfQiJ=Vw?wNo#sFN_4o=pud>z`7=z zXpIaB^lZj*=B*ka)^CzamDX-*p3PH3SXEJHYc1gIp9!aqI%HW;bk9iH3$VY*c~ z@UgMh`yvOH5%m)kajaX3eXj!~oO?RNffFY>O@K(91vZli4>nDvpl-8gxIe85Tm|}o z939yzI%YOrp8ULdQE{B~Aj3&j9Kx92t!r>H;7N#+-t~&hc;UC(a)| zI@V5RiJaM-+Q%rY0V2~LZqdeZzd7`hpg1CzMq0@Tl8S_&ayzAX(4~r9Ewj-DoYtzZ zgB7?%6M*XIs7okY0cY~XW*`qdT=P9%6L~TM%fg$l`FhaBKgJ!p>xDm753N+6bupNUgAn+%XK<^%d)HrdhV2xL=Va;6JU_Q+9spf23mFa zJtWJ|6bYKh$46kscII0Pdd&o%STkkaGE+{n-~8+U#kZI4-%bN4nF1enhFWJ4Q?q)|2h?W>zV@l|ZTG(D92^p@JqaeBi2J-xOm8%CdRADlW zkA}@|bG=bwH3$@n(de+oNiPiwMgs}AP(a;qiKfAdQUwUiUxFF^ypdhl4+W)x2lzwW z7~jLjIXcBAmo^mbDQWa7HKJz#CevLCgs zFo{VL2(tj!l(Im{u5%4u)ah^?m5&~?uj#oTkX}4Ohy<$|FQ4Cgxo&A7(6@fqYdZhX zS%y4SntAvbdccf)>2(>M*UHL}KC6y|BejeS&dn{L?7Gs`S6*eZKq;+0c#v{{V2_iw zc}+vNzqJwf^Q09O^vL=1Y5LyXG&(lIR_-Ir%jszGK7**YI7s2`w08d%LOh&Cp8rC+ z{1t#GTd~M|T2NVLo2k0BmX4O^Qa6VkbZ085 zH^<2_)Av(tWf>Wx8IZ`y@Ba7yU3&F5|4Ld0Fl9+^?6L=k0LV7x6Zc_Q>L_Cm<}j+U z2gvG#@8J>}{g@>ReVmKf;5tUn9OE;^b1f||65*XB!n+$$tR6~j}Rz-TAZFs=bw8qYVYLD8oc1qk#}FEuwdljbwnhT7BUDFNn;Ze zY4zbFfQN?5MlmAA8epzzEu6J(G0ztu)qrnL*|QV)y@6b{q1;2EVeq0}7YE5}7st-? zcjQu8SLAtqtVxuSXo*hHZ)Qh&r5tTQ9CWD!<+@N>r3lzTFcG9E?at zjT-kBsOu0JQFK4Nulmiy9R#!jPoGKM;CoEBYyYjYtm;H)j0D)MEwVu9U>k*vz}Qb= zjkRe`$C6E68iU8tbF3jxDNmKCVv(_1U5pm%hQG!Kx;Uy7u+6_07wGr~03IT{r_iyc z5yzO0OpP^9A|p~?J^)B?-#&r`Y%DZ}ZU9nGh16lO#u=tf#lill{cj^mOP+$LDowL| zTT*5u-A3sGygoKD!u9HDiW=K?_MjI~K15KZ4+k%c{m5aDmLJ|v*Dp^}JG#c&?1{jk z13}c`jdf~?!r=KIcWOWKlh`KoR)1kWiNU$&Swgzmgg}_ z=8RKIg=~TzM{gTh)5hvwM&OHpkekQR#ye7 z$_TP|ky8_A==1Q1`UU}oAO7HGYUvt*N92Q09DyYY1Scvu^U8gO#1&o;^ax!FvI)cz%!%qY)0`ISjH1Dr+`A3c$RkAQ^tpK}AtE zui^ULT%I+`CSp-aCD#x7qVvdBcIAmgqXXj*7yzewPy3rzn+0_H{VWV*D3O z7MT@;sz5{d3CxL}k;?Wxl~|9#KJ}&nQW-;FD@YFA$vXx4I`rb1aOSyJpXEK!Fz*r5 z@N+wjsoOXm9XQ6>!K=@Hd;Z=-7Q~+0iR*({Yw`)v!Y~Dc#WJFR*jkgQ5#&$))qiux zCjYUyK7f%)qeX(WQx>bSu(r|DluWT%OXRV50l1>0+PVy)vbo^^3&{4h7AI#G`f>=C_U}${}C`E)k3x zZ-WgCQsw!rgEB>- zv9XRW$ELc8Mu@E^lvz}C(p2H>0$4dVMuCb#ZoG95l!zIkpF}UbS47Y?To^?sMi{AL z6@VdV&ikhC+~+-}NO)G6l=V7d%J+C(JxNoYInqbH)SKo1>T$^i{`%h3#YusQ?qJ79Jvjfx24*5g?e!C89l z^IuF~{!8DW;b|z{eDfWyJ;t~;A|_8?;e=8~2^|88FjUs2H2|@e#Ke3~4BMR#Zl+H? z_dE(|pRrg77SF1OSVo!4_dcf}apv(H9w(jWQxiCYPRuxBI6PpPy&FJ*Elf|Rr>PF;E@ca0fR(e`-x7Ru~#e8v+40icjNfrGwOBM8Nqf!BhdQnLfT(gWHj1R+L>R( zLtajgKEBIdA2IH2F)cotrJ-&%tr8tyB^~zQmgNJKfm~(QF?`=kH?v1;w!x!gBSR?Q z3iHsRBMPw-=cSB1H)!NK#+ceoo#EbSz>qlC@Dlt*Kftp%GM*4v zBa&SMu(eR`kf&Zd!+TU{&L=jiK^y>iLGknXGw$DYek$dVsbgr7flpv)sI<0H1=--UELb7MW&+3oE(VZ|{uOYrPpCt00!xOYD{qt?DCwbCd2m6g*<* zC*PSn;pVh?v5O!IfIxzxM2e(L%SFhROSVk2kF`HQPep(qzA zSu_EWAV3fVu>iP;i?EB0H>bIibLgJV-TwWa-pg5BOz7_Sea|^h{6ERmF`rw+SS9K6 z8us+r%b=OcPVEMjhl2q|DYEJE^Apg<3ZI{zpGgZCj}F4%aAH8k2y|u&R0Y}HTa@LUlY+v5Q07`8ne88kx_sM#!Ex|_uToCL|bbmd}%E_=vz#A_6h<}?Oa zC~&`+Ac^c0Ug5k(BDaYF4!q#c;=y-$jjG-6tqMOSvt>0$`k%BY7C;~8qi+s;j6(Y=HXkz=Cg zILR0}L-fIo;z$d$LBvk^n|pYK!E64nbXviP#zsItP>h2@AI0$+Bez&2D+e81yL>Lu zcqJ{Os4p%orT2dDcF5KjKl3qUUX%RkQaU$19nPd{@_FSAh=PF^<|^vAf!?EM#>iC` z7&28$omoM46-4v-FV8-la?X3m-^-WIX@di1=$uN^(!0I*jgm@3@E*ok5wuf4#e(M( zypyhXsu^-YHWJ&Gdv;}NVJ@VG7E(rXr2h4(NJ^bEpFnWcnysa z{7&Aqe}j&49E1*wPQQGGpyQrgQ^w^iAUNa;*Y-JlowbI+OA+okIxR!}PL_RE7Aor^ zx0&|_SM%KRQW4F*44KjC#qQj{9K>qj{`!YLX8^R)jGZv125%g6# zQ{XxRZsDlI$426fESWx~bLG_CA~Y(^XiGR)%aj>24R|@WJpc!O3@3rV>m)la*fbSW zX^jB!0Newo1{rRb7KAZp%6IOk{2O@kXcI4&KXlkk&^BqSVHGz7t+Cf*j(vwvC6B~= z`XPQmO=yk>#xrTPQ6>@@I8~0EoEBDpo6W>N!>F7H&$TcC18Hz{A$Y>xS*qa;zcqQ# z0z_%Gs8&3-JC+e@kOM0SLDS#0mm7n*AWZI|;O^joG%+;wutk(ChS5m2C6l2@g@1>Z zR|;}CHWY9MMwD6mgA`OaY!t7VZ&K6vW>Z9IB+@7{fuQ<`i*$~_;y}k{&bWDDPEZI+aIb*em!1UQcCa z@7-}wVbSb7>&|2NvP=wwLYl%u+}WVW1_!wgGAIgW9%_Yxfa@>cfY_)&UfHM6gkG!& zZcaSb?^;!yEr?ksbr|o>{i@)N4KQ?|TVzDz6ljQv`3!gnO5=QNZ({#l!=9%gqfY3N zP!w`?(7OuZ_{|3o06!o!{{8SzzXAah5}Z4iKK5&$OIH?W(!c(f|4Z5>C#!x8Qu_g^ zAQ;$n4C-u#Vno-#^AuP|hU-;9kKKc@v7t0WE2J_A-}`soVQeC(|MEo);*|b}fAxPt zYeo7|?xqKXi)JU!QnWRmcIi)ucENR z)nO4Ucki>m8c@&#ugSC-&MeYwppkF?{_i3j$!kZLAgguufnjofdAF3+!FXrV?M$HhLBxjaTGyeEpzMME&z`s;Y(NmLN$ z40R*KU3i4xct-g|C)}pz3N@fFRKBl^z<6W!Kt4iAA^(UpaSu@je__uwQl(!Dj@0ND zEjhfR3lYh$UfULADrvb;1JcpMDj9n_o6i_g7Z}EHw|T$TDyThl5KNm=xf|lvVOK%P z$&pLV&!S@k$X1rd8TOys&|rrNUgke4_f)RYR*3J)s2HdL-)k)7xM0XKm&)(aD;!!; z_-p9I0EKz=W#s|#<^idz-R}pKViC!DFG{oxjiyGOEus~bFF_2Uz4W8oI4q>ns zh-~zr6Vau*Xe8IDvnNNKV{aR5t%E>jFJ1&G97-EF zX>(I!>Eiswv`#qKHf0qYTAiwMIA|8ZY4F=x@dO8Q5&D_FawXkdU87YOErwXbMGIY_ z!3HD~g;qotj~n)k79C?B$X%V9V?YfBbhF6kDHQB6g#08@4WdAsWJ|ZAe~0>#~Af2ePS18W@99>A^H>KUR6{Y zLGD?L$~mXNv3@|D!QpHcdgF zFFtoUtvz5}MDQ-o45yj%^J#Cpo4)zlPtyL%S}I`tHn+DiV9$fbk$(y8j*|K_JIkJe zV482wal-omG|22R))8k)U4$KcJU^oYj@K5@GzFqfsCSgKurng%tfi@O53TszMur>_ zKgx4EZ7(DLB11$$tH@O&9wlhUmUN@gjAV~!WlxMMSn;ntU|)VCYC}wlETnmAB;LMx zhZSRtbNwv6(g)esI%la#=h5x(*>%wc{XTHL~<7Z_cd=O)E z;YSo_@KXav88|i0$iCuDrqJ==v}N<}W8|Ti(`)cfR zz7sQ}q~_616|q9mMk>$+=)e9)zm~r9|Ne(`mvk|WR0lVD(g7Dx5^2F?2M{!wOw7#F)M4^co+FH%~B zMJA|X6d{4B6hv{dfVjG4@@$Zvayx^eN?EnH;<0D#y=0#o7&x>N>1$v zLdryU4ehfSQU@4^$K-;kfE<-)1+4~%hJzmc2tua@nrVO6BZQ{Pz;(3Q&pJw#CF6L3 zL_mawa#ge^7-t!fxw9{P6oERO*6u$_TlCT$ySx|$S`miE2r{qNyuboy(L<^!7=130 zdecJ25me?z?%cT>A;M=r`YHrgPKAk?)Ec7012OCg@_^UwKSYVnXlX+PcxXu8OaVs( zuAavDY9C?fGBIsB11gKL(K}(X)18$xc;jmNJD>ZFv_S6gm5H23s zi{}ZNL#wl89Ig@t4o5p1X$gm|O3rQvnlbF`-0~92;Q+&W2~_j)RXRtoFC(B!dpKY> zuD(oq!!{j6#NRkKjJ_EhfEMSyeP=C|fF!%1iSK^*2kEW1enKAd9@E!0aAImO7If&D zv#+9*B1j&nh|2(@u)7Gm8pd&s{IxDhL;0=0j))7?H3|_hvSMl(PC|sUKy5Vsoqi~~ zDG&N!h(OL9aYGl2&Hzof5%%V{H~E|Wvcvhq(97fKRJx+n@E^~iFtme(v?=nDsgS;e z2fhVDKMDfmxK2|?#xYV9u=Wfc=6w~WSZQGCaAp;@2oUolQPmj!M;MPo>D()?q}8{7 zMv76DA~}>Tof`61?Qo$2Xj_&J4X1>%S#~(2!(3y8(5@}Vx}+5tCeC1pi4GJDVM3_R zLpMUCK_93q=Vt_?4WBw*Th!`^R!VFns0p0QcgI(<$tXYeO!SodaR_nWs23o5U{mxs zU!7wdD~)0CW7|p9^2Fl)5>yheD8h>2oKLxFB+G{EaVkJr^P(wH?G z(@+s;pxD9*#Zciqjjs86ptY=4;kOl8050_|0&U}27gtAG&)!>Yk zy?f*sR+vCIjl;S__*_(#6G5qvo`pg+glJ*rk;P~%cw%atGyys~>^)&&guXEVw|7s{ z{fE0L14@)b>6EA`5T)DW{NRv4|6c#vYxKgNNSEj5F^x$LYFk* zi2+h6HgNR!F?KFsfM~=UG0Q`{gFL&)_>`ppu@LCI0RwGk@!9vpAC&!VdF^|ES zckkfq<2C9RrpME-{Ng9m58nGAZIYvF)Xg*~3n|C>_u{KB1e*Ql-}wPNMtU9B8sxJD z9(0_wRFUOVoRbC9gV1q>p*baJ#-i#`A`vZ&&Qpwg$EbTAJwnyCgVUiOL_SIbh2@fe(!(#-SnIP-G2#hlJh@-P9nwd-ktjt z%u<8k{!l|R$lAM(^o56w7OIP?M>wsde3?@@${G6JL!5#VPOCZn$9sFJxp)SCqJ0)1 z76bzSVI-mVg7%;p^IvP!TGY9whHe`}Hd3UBD4CyZRI?C1mkX^wWqX#JPpvAs0X9-WrdMf_~afnbAx2nQANsb8dOL;#tI>{ULsL7W@t9icqysIp9AGoQFl zN|3hDIJw`oyKjbEj#hM}nuu7p7}edz7BVfh02tTAHc?MFkj7rsbsWGzjFq1lu~1~l z-oDMkV?kqf*D64RPzOK?C^s@x{+K{c2;OMVw0Oud(m!|@z!wZ?=&wxejq*+TV~VLg zj!S?~cJSHTp_3G;cEjkN6KtU#`!+U1E;xp^5wC$7hn3Zv!74YblZ7y?XcLi$>OK^g z7iRaNy}lQSh07-L8KpWzSJ#_B@R*-fg=wOA1mziu3t}{^%&S`7#2N*L`t+*iEAIZez6*191$IowHtMM0pmTzEOD-*7o<_` z<#|B(PL85_WpX>&Hwzl{#G?2JWg~+I3WVa4crpm>A@h9=$FlDp z(iY9gMWYHLt!(|lI>l@}YXQaB8I*u6KC-xIG6Z$pn@3V{Tc}p{bqIvuX9(;z``SPW zeaNex``r~MroE2CL4h~ZDRzN5c*RLvlzxOc#fXC@4x;=`oy&xY2)4= zNGQEGL7^YryHEFln%SXTE|ID0xihvA3dt;^c@hT-IVU zxP<3XQRo=hl8Fb8DqT?%J?wD}QeJa1(kZl=G5n~4;#v*pH@tCB8Ycq61e`xNm&IrpUs}SEBT@k!4Mb&z%*m2CqY|_o72(iC^w9>jw$RP+ z)d`Bnp(!yK0^_Ta|D`;0JnpM$g(^hqr7>q>cKsXvo{2WC#{IM*n+Lbs8p|&LD`wYR1^`zkky$OBpO$Pmj^-4 zOBNL3JVkCa`e6OuMjD@=pg^pW7OyUVsxr(0*-^lNj6okF+vFx5J)s)|MsyF*qkL{Y z(6;IkIqFdK0I5GG6sZoe-!(>iZ*6YVF#~4}M0f(F)uCV4DEAs5FZ2XiI)Z#2U05LB zQ8^7gV~F>29+8s(1tsKz15wN+Z4xc4qo0n6qnAF4IX(Pf4M(R1 z3VjASNyxL8Pe1A1N~=VS1~A|(#BFY%bf<9Ukb5_l&Xc-%&iCGvok-s+KzC&dhqp-yvDM)^D4*#bZQlFb^(ExeYMNV|$LP(aEuva8 zOE|H;0wd#RF?NsA7yjVSQxRStNA`7LX1kPpuRX$UrlOilrH4fjlV9 z+lzgK)>Cy5D~-G%43z=+UfHD67OnT77YzsXnY9UyZML4ql)zOYZ|=`5X#7jBJez*_ z#s+!>=TDt|=3MI1{RhGgnHq&YfF0^aog;3+S##L&y0Qx12qni0$Rutk(|-7V_)Fth z=0s+<;bmVFH7=Cq0HKgaJg?o8Fu8e+P6B1jJ^0XjX$RPrt`6E7_wInC0@yu%|6y93 zDl-UZB-Mx%j+49lU~QYCPLO?`2iAuZc^;liun=rNqG9|wbmh$ASt5z!?0Yx8@WRgr zlACA0{>G~xNq_U_e=*&30NSDZRv$i$)Pb?ta;gr{_K&n3(Pr;2I59B9xFlq! z=Vna=q9f#wT^o!h^-xH*AM zvN75ap*XmYlbC$3p+da^m4|s#=6H74JYjkp%jft|6QjALkq)z&qpBxm!nYOH(Y}Lc zuxFmi-TV(9!|%{YgdYrXN-%nk{l@8FX&`6Z2;qsRg&=Nl=z2>kH9agTT(n1p`)a1e1eEUVXa zf)g{1Q1ox^ceFwPXF!<0qv&e<@=K`>aqQq_s~87ioc0M6$=(|h&NCLd<-YK_yaox+ zW`RkI%6z|OKq4x%#%l2S62&XefAZsmM#~gYJf;|nWd3nbi+u{F4(Q#=zAGH_(Ad?5 zxwOG~?J?4JG}}z)hix@NyPDVkEFBWAe&y4jf(GsJIi0RObA{-?vs4)%4N4^7A4FDqJoEq8}Eq`Bbo_)3}u1yqIbtk}d%L0LJ16Tt41Y7c=w2$Q>{wty%F&aDLEDjf!$(At9Yi9^TLh(1`NZH2UYlDwn|4qR zC+sN~hW=O|wCY{Ww{4N@7I~y7o_v8~Fbpd5g-1d3?D1=HgGOLa#msd~IdPq;Wt}>s z7Yb-oJ0Nislfs>Kso)9o_7H%;AEDF+a0o>#IvDOl@LEy%!?klw994KW51kLOMhAZ6 zEn4HB@&zw|4MZTr$M%y=rxqk(pyAy1;3u61xSjj*?5thGIr9HC5_m@CQm=u5%etdF znS<^({pb97h6WD523kjhlpU`l%X-kfPLe4uwiyhEoi%bP-of8d$SwJf^~ShO{*EL< zUPwY16!59tIgA46;Mm7CTHPTR?K7-#-_2i;Na0 z#9l&K4fC%-H-!1cOZdFBT%4JKS1Cf{K7;U0867Zy!SS(|UnW{|oNAy%`KehVIm4tf zoS`t){h*B-^gcxNuMMiY59&NbBxhz}9$F%ocN_-;6s%1Md2x9O`AJ(f_$W)hI@`zC z({`F1hX+Z`D@+)oXE+Cl^wtLQFpB|#L$|kMv;k#%I+(65-Jo~&r2q~NH}>c-P-bkZ zgDmVp%;vX=ZqP|#FT2O|Mbb8I?Sho5gh82+Ia5R*PLc8I62q_A!cIhjEcUHKn~xuE z^PXb*?$_T}yd=-w)ZBh+D9t#o4xT z4-|+M7(-6~4^L6ws`)-Qhh^ach#@Aqv zlr@lKWzS%ZFu7+7e-(&OHRy31eml-;ZhbXnwsBV8tEP3pH49p^=+p`_>W|)kJ=Q&p z6WJimF#`<=XoT%XWJ@$!z1(5XPWK)F?BN8!g0HQ-iw!=2VGEiXwuHumddk`z zCX-@>kCnAS81k@8zw4zb>PDzDAVR4F!68N1ev~d4(%u|}fx5v@Zrur4d}^T{`)O*R z@^T1%GaaRagEs-Wufm#?#mIg3Iz!9PQ-GKb zIyKkER$!mA^y2NZ$CW4B$T{RQ&X7fF(t{|n=@No**7*!l$WtdOm?izMRYA`4ELkIg zEn27|Tgud2^mLENyz3U!V>Z9e%8Vq6}!bqJaE|OEFyl^ zgAf+)=gjsMm~EOwZ)HV~^qei=a9=%@%1z_O6Vuve<9u(HieTNTKYfr=}u| zb_9xJCA48~&%z0G8V8OY7NIF}*kwY#b~&)fP9t-s#37?hzVoFog#l8SJ&U2RkeVo( z26;24p1`ks9wCAh5eaTXNE!@X1cZ6g19;3mzK@V|l!2S~K`87SiDL+|bBPE6V=t+I z)bqx=MTp|LA)tFO%bD{_gheuG%yPS5}9-%Bm>#=0=%`Uh`C!|qW&`(OOG zznvbE8+i62Ied2?rJc<^(BN|V^xygnd2-Wf?~u?bNKuxxoM7aB>)YQ=f8%riDJ|Fb z(=WaF(X`5b5$sC;{crtO>3{y0|9jd+!A=k!?GV;DW`7^tdM~Zsz8#anTIAs#ldGjM zpciN^rpz*;M?FCwO*Z5d;XgzemUm!4Ky&=B&OSsQIW%cCxBUi12d#KLdxmEEOnX~- z=WS>cgAY2W^)Q$hpH1DF3kZE^hxCqs8bQ*@8vx4eSHtF^Bawy{C`yiN_YT(>mAuJ& zxqCQzoCyj5QOtRB9*fVX)*M|S?41a+0};i0vG%wov>phDbk0Kuzd(e1w+l-T?^(px zLJ?<^iwtKhzGM$ZXqC6Le2zd2ZFE4R8qlPcrl+_p_pM`qW3drJ&Yw}J0#aNj3F+lVLrW8-%cC^2|(N- z>)|(DA%TPdZ$fc4g7DQ3i#y00SLiga#t#o#Y zlrkL)QEhWNaY~J>Ltse{bLY5~>ngKVP0kwQc?I72a22fA=yxgpQ5JjAf#xt_}5 z5~-N-nK|68wDD|_JJlyQ5t^WHLs`|7=ikt$Op`f#|ri=6JXI|H>898>VnFl%v)wi2ppeb(gM1OgTgs=ww+7WcpZ^_vnC$ z%!{Os;P6dCGvE92SJHp<8^4y8&n?9olsgV~=nxe#hkS~bpZm~nU2v1vdeEA6AOdw3 z;M*_%h)9m;`80X9$EM362aJfB`te6!{8nlemmoCkALNiD=#^?t+SQqozTE@6J+PH= zZm|dKsb?qnr!2526yojE2iG^%ZIpX!$Pe*-U>K!dz?cX_mBp)6pe&p#F2JVC;80)~ zvD#TiEuvtA2gnlkR?nan`3ygB5DI$M3(+HVg~%0HEZ$tg;0`v@Mcd(^PCr%Lh#+Rf z0`KLT5QkOMqAEh}(xQS`;&JK}bQGH;Lu#BEIx{qATN#-`Hjg|yKB%%#4k#G;35MYM ze7BvCwtN@mhI-;J5IcgFH^2;pv+z8IF@C!wZ8z$8ZoJ+vL}M*{iOBjnx5sd=H__M? zB~~%&ec1Rt3gEcUAZy>hdp})z<_0Mr9s;O^Q+5bB3E`V2kcUZ4DagX`$KDG^a9HUl? zppYl*C3u$-4EP_fo%Lt{iDWep9K)=`{>ycQg1um!s-gllgvHaP{q;4-gC2P2X41^n zbD_{h7tP@#8=k=o6n!96F3>e$=Ae<4AATcR^b}yE7eD!PY4ZHl zG_f?Fs^*r?K!>b*s76~N_QI|fZ~gR5di%~mI;752x>4*hS3U>B|2cVlX9+1~BcwWn z;5N+SnBv`gx2ej`rPsgp_4MS<2c%6r=9w|*Ie$aeA_N*ufmjmOc1&v?WW#A>Vybp0 z=&?UmFuT_x8#-QE6g3QKg_r$nqb(l7P(fUw7|*cB! zBm}K)u|G&cjsWFi9*l5U79WTx}f~VLC%~AeAcO$dksXFiK%d9CObe;S`9hhJgW{LYD9k zYeKpE_iUE#CMfa(1wR^Whor*vF#cG09|1$@IUqzs&Q1qikPdboCCf8_HlM;%FMjr~ zrFY(Z3quE`0>U6_>a_!*QJV2D#1OQoqwDmYFyc|h3j646f`I})!{7Ob@r|$uS#^XH zH>4Z`3Ce@43~+5pQ{K$7k)oAsjQ_{ARS5l%S$ScW>4DeN%07zAEyAd4Qp{z#&KNXV zWRHqG$Ar0NYCk?JavCj;_<&*bjmPUTJSXHE>$MWwC`$(eBV-O|2)2$-$ODBu%M zXlT{=oGEStgvTA(+@|xuAWr<`S&Bm8sTR(-#!8OQX;4%!PBnmrK>2j;4A0Zemm@_| z1BlF$$J@7Pjc!<|0-CxFKXFe7d1y?V_T~C&JM1}fV2^R*c^t?HnS$=-8Ci{_rACG6 zYUIj}(86H=+7RFwo}57!*piWy3XyourGo4;XVUG}26<&!sj7@F?MQntg-d7Sr5hDQ!P|kVZJ~+R6$%L8x8*19@1} za6<8|1(jtYiRP`=Njq`x8y)iXX*;EU&%-oT97M@$83v`mycdDfkw{ z>PBJ;6@ak|8y4<=KQB>-nkqJ$rr?g5n#ZY?W zxrc_hOAkW~M7@K^x+$`=5YM3++uIvNGlwzwsLQEBxfp!2 z?768y$6R9wFsN@3g3z))Q(T(Rwao%7w%VmHZyr6cwaxG%oPCSf56Q)!nxRMZCaHKl zyAk$^a*chDp2?)0&Mwml31@PW=x^8@6l0E^k3Me1xnB@F&H8Z*OnYv{6*CO!3+XG89coMz{=}2_CVWB$JqYPF=-e254j7iYC{>-u5ey|@`dlF!wfYqIUEj% z*qmJXcf;WzZ5=b^FJq0pCQ%JLI-Mdf=uf~|a&|;+tXHaX{ULO##nx4T)Ex9$t74QB z@kz7+9q=ISzes{8Av6Rfn}iHuF>re@Ac`2LgB%!S63A?Y_uH4llJa{m#Jg%E~WP1~rF z^X3@GK(t}p(NT=n=5V_8-S4NZ)m4hnUQ7jy!($ZDF{oD#V{#lJ<{lGa#H07X`QrRV zo5mc9K}e{!{v`d8(;VJP2VU z2P$4TBy7MgbUr*yxO9P5MbL{C>=xfOp_dX$ZUCb>a@x4{7~Z}vQk*%=Y`9v)hlmPi z42lBHcQss~Cn!IS5{4}rVrr7ZXE;v%hoBh*l`V}gr_8Vd4WVsB1gC0%P+ZnMK_Lwz zL>$=AKH7wCxewPA&Fg?rYOsj9nP27KEjWPm_?zqE2bez|NBpE6!@_HDV@)+yVhKlqh_Jh9!I-)0?r5`s}hOOZN&ct&Ofz*T6!O=)|=Do;9 zafUP~LRD>!T@OpSP+$qo+EVwUi=p9>U~+gOZh z%P&ZsWw|bjW>DfquGiRQ&xm|fEp#(a4`q&oi!=pgh5tmGLiF%lkp{qGbdnX2N(6|S|znd$3M5l-ogKYej``L@L;xCla1uZK!7YLu_c^xMgeTPP*1)U z{b&DGw)NFMjkPWEK*o7I0WQSOvNx_v8dEs=<#*52Uww)5cHMoU>>J!;et#-geO)I; zhm0>HX$6;rY*D~j@1QJq_gfrD%^YckQAy+`Un5{ud@&48ZxkleUJQV zl&o?}g=kvQ2Dy|u4A%Yall0a%ei$fEo{lY5^n%mTM7DbH+aZy#4k%F#WO5j}n?v5@ zKz((zPI1moaoBmlGzjWgrEja~x2R^+cOWD2ebXrjr4E)wyXu({WYZuHv+W-(+7dp? z3=?j~*v(_a!lY@_AqlLhfFg#xiTKg#aAlj6hS^krj&zu0v~<9;Rqs7c z`x?r4p>5LR1aWkZibPya*H>}uu@6YEFl7gB;+ddPXr^{-p*wwAG4?v79**OT*oRth zBFo-t2iSI0w&7_a(sa5`XCBaCd$vvxaoH!|TLj;sOEW|QosxF2x1E-8M9wTNAxg+0 z$LO>@pTLzU`uWQj<1-?`Mt-K|r_&};NsWIeLJnhW9agqO7U~FP*s&g^P4PM1O-#LjmrMr{&liC;MXJHQl^=p2);w9nYsVPR!44l9@&cC9ktsEz z@*2Z!Xe5Jin^BkB6UG=$;Cq` zv&az8DsUcNw8QXh8xMjV;d5LNfg9ES5P*4V_V*0|O=u=i0S?+U3G*&QZbI;~F%pOfvRSemy@v~&lW}EhDKGj3-$S*8mKB6KCBTTp~}QH zij^=iF!u16fZf^{yEzOD_=}JA5fYJPm52GcA_|7!LSgd%dN-nCQC-g84ME2HFn3+Q zVc}gAs)HyRBnY{+j$X8Sev~9|-#@4@;X3BfdS5yA5Tw?8R2x~FB4HRoqEUfO5$abV zs|@{adljpZ(GBS*-}p=NR+?$@rJqatDB2^QmkR{i+C^BIw>N-?E~4laSnDC zgVMntMTHkiyGEYwAZ@$KO!Uc&>*0gE5lF^TKp zPkx+U`@$c9IBcdfSFfhe{g3|>MQ2wc&$I#(*W@0<^Z?yQu=cbhc?PR7MNcfm*a6W9 z0}q3BoR(5;iY%7NeY^eE+v)MUx6&Ve{`2WC{_uwS)H%2uAy*6kwkT^p0@xf+PWysF@i+Ub-1EWH2ZBU z7K;=_%1Y!$8wwHy_xKFKK%nGCA(Y7R%GIE;7D^lAvq01G&emi8wom8|B!KjjHpOHE zRK<32HsEr+bMojk)G?<$mm*Pv=oL%|syn-ca??@8;1l75CfKx~Yxz`TbP%D^HW%H3 zQNom1?AY0O$o$|#F=*bmxWbmd@ME27nw1@erHT&Xi{k8wYiqpHvJ^l$NBXuh2c-v z8-HXX)WSRkoM+ObhtMzixJ3~{j1DKz*^AXe&r$eQxn!3Tzf`6gXr{*u;~d$aS%=;T zjWlCx93UD(c{=u)K*;i(UEhG`0vamvCA8+%!XZ(nLRay-hOd8r`t|g^>`TaJ?&Shh zu2IC}uChM!H3yJU8uYeK)XCu;d_4mG)gbFFU?j9a z`c5dCK@g)1ahiAbNxd3M=dYY4?Pw~^pWz>QwR40`J@-*$x#>@{^9yO(TwJQAol4cy zkY>+pMP-qiZP}KV7a(Dgt_@_}7U}Ic@CV!Mbv9kZ@Vc-F|00Wr8E~P)w8j7f*H;7q zHWF8+bAyA5<`!pRgQ2v!v)%6iLG&YoBD9@diCoE3WS))xP5aWB5xI35-5x;1`lE-S z&4l22&E4+rrz#Ia3&TpI-a$2{jU8Gf=Xvq7sZwF&BD=%Dg($V2njGqi0q1*Hye%Rfe2 zm6eBv!U?ZWao-wn2QzvG^%kgc3P{Ai}XV;%57|s0EO}(JQt(HHAY1-p&az zh8_A9Rjra=Y~i(FldtLU369wN^#EShn3MDNx2TzsA3oIk0!pVu0j8!WL+16Tm6AF~ z>Juym&u8dZb_oAdn}EKyrJ>yd2(*Ll?uS8ZzP`~eBMG)|b2yJ}-|Rln;JcsI&e0fG z4;gK78mC=T!f|ECmCbelap03iZ=`#2y{PSwz1aiT$@bvv*jrxkhm-~OPRTL=T~=@! z4)+;S#PgllVFAMvyE_o!8r_F1I&?~53-on@=*up8`1y~%k|tPt!EE)re+(jWbKI>GP-gN^8dwv;`r zqniyP%>A*CwR!tFKo;9=+E*6|7P6|LP0D?ZcGr!-n|EtHmU zuq}wh4}5bxxxyD@CX}hdmWK~vtibg>{r?LA%I=`}@%&vegw9G4t{j5@Fesdar4n`o zo(-`%1r!g)9YV>TjVdHtpV{uj&#UxolVPiu0?c0LuvErTW$ZE~LIpneYao;qq)rft zd>MYXV8bwUz~?ZGiUrvh0$BpqKvd91o)=7y7QY#fc^%hsin5)gB2-0DMd7NbP2L_w*gb7dFR^!M|6?-u;@ncY^Tr$L@$C;%dHDvd z!RFE~`+REJNCyQ2B5F5)A)e2qc9Yeyci9Jo2gswy`w-7$Prf`Axor@evDsOK(_k9E zahXxYC=QIbG73h+LSi3Lpu#M3(^KIC9q&+W4C?WnuYN84?6q&A*mCK&|H(g#@WNOB zzb~W{)@j~y(V{EpZU`k|o2Ez^z#!FVIEz8LOX#uAy;|hpb&hCzhUeEp*!=KI{~>(^ zWZ;PXc<|QE5OOk%)$~^IYjn*l(0StSO_*MVXUN)u3vzWi6YV1l`Z6fXLBjH$lcSE^ zN4@;$KEWSF?|f0$Bs@~+-H>bKL~+LaIgA?fwGBBHK&&(Zhfo-p4SZ6APaA>zvgQUO zs86fpt0ROxD^quL%y>L#1X|gpco6<#E90382y6gWgb1Fnek$(`UPJ{M9+KWpU}iS1 zk8l$1Po^djDmMIYzGagl0W328Wj+h>!{1(u5AnKx!{~J({0W^J#vwn&)3ds8I{YpE zgZ6@sp?{UT&+LP$NKknw7{ZTiuHXILG!GS?yljDs5s!(bOKBgcFFSx>Q(ov?D<60O z4KVMigW+`@UJ-Qy$RP7%c5{WVVT^482W}Q0TSsd9ITESU4 z`m#KWQs3JOv{p1bPRB*?Peo$a0ta^30_XUMtY4f(E`#&Oy^vAT(tz~~I1DNx+k$m) zA_D#6EZid<@j(9JcTpw{gdtuNjj-3Wd7MUnf-sb&$f#J13*z1(Tipk@AbwZv@(b$^ zqnJHmQT&ZJxz9RreA%Ty`t7kt8JrrGU<~6y4k|B0^FePqk@g9-U{^V;{87Hy>sMZk z)@pHoWEJG^Fbms`)ms1}B3PhDj zDrhB0vwMsm4KIs+PB30H>>J1p`wIW!fAY)eXK&t1kMHf^)Kd_HY!;kouwGjfnTPMB zOpOMMv_#M?inMpNir$uu7DG4CH&(zmQTRTx6ca-k_{r#H)J(8vMh9!GP2J#>GW#62 z)Q=b_<^@+#)XJtR&fPG3V;jI@QZmflm9CBIJi31;qFv{H>E~170u#u{=^QHO)6~Hw zQb$N{LxwsY96yZQ$D@cQwHZ|zQ4vu14*O(|yTxIKa|`YdXJKl^42G?F)K<_BjY2!V z25sdq_IenV8?4j5uhjfuNLJGvsV9*mPGa^6sH;w^;7FUO(jmW_Gu+xojzC9aV^bJW z7$OJeT0>7bN2j2j0~9=(uH~~7q0{|4cL?{Nr00I&7bxN#NAHlI+c_c%Ny?yLgZqH- zk^$8lL_UsVm_T_LLqsc~F%3)8{`#qP_Irh2xo?M}I~}P!e5Apvnm6j&ZsM3=td0W2 z2*&6{k8+lxexmtK!5c>BDO)EaWJ(qZbJ!>f$GNQ?HL zj=Xm6Y0;*Dn0fsnzqzmmu0VhtC^!%dR=}D9_5gB9J*2$1n6-_*Xh7!*;CfBqMcE3P z%ez>96wDp7?@s(}@{AES!O-=r96Qf6rvBepzVT;i-Wn+q3MkAL$)M0+48pKj$;x{q z1VqS(d1GH2`XLn=R|tj1cLgtmR^aa{!bpwa9rlpapi#K^3B&m|_gCRYGkJ|&gpL*A zZ9Z!yypw3m*R}Y=hhtZ`4?>EARwZ{J{5~Xa^KOI<#21LFPsFnhavc#2jaWTuQ87dQ zDkclzWMGYJL&?#KgN?D+Xb?eC-QAAKa-%Sg1vXtOjzz+}zJgW3C}D*I5z2<8@OZWm zQ#ps>2_r=t5T1vpU>ztR_D16M8jM3cGPMdn ziy&&^;p!-i(qAqW+G+FF!!V+4=P^vKifwS5`#wzvgC-DuD0{r^x4!kYv_ZvW`Qi(y zMGjR3p=V_yNv^3l%lRUp2S6XP^gbM>YGVK)*2NfB@DHdQNog3Mi@`96jVNzB2igVN zQa*c$`%5$!;N!FD97xXU1KNqPkM?Sn$%gjw>Df!y(klD@gWvxH23`!O-~7k_n0t28 zOpwuA?VIP+ny z)zH|)D1ubt^98Osz7Hy zG_NzK0_fR07+zQSN$(au#PjgkBDwpRaqvBSsFrb+wqrAF2MhAM}g0`@k0195P*o9unzfI=UC?;CLMBUHe$CpFLI~ZBXhAkI67#q zK+5nOF#vfsL|lt!)rivo&+xBIK4l4BI@;Of^SZ{YpZmD~QRvEZDX(i*;w8vo72$z zn1=@$vu5{>U%8igygDZQCC=R3SPeQ`;MwO~n)Jhi-0b|~*|b3mKxIdRkoeNY>*U@q z;e?{Vi3|>wNeRQ~ZSV}r$uSg@!%Oyg&4S@klwci4Ut=oUwo3xt26(J4blM^OZ1k>z zp`o&x2ML};4pecphj=DF#IPDtqCmK6pi!drb`decWrXE1d5{*X=unR0sAw!Tc)xju zYj=2N^qHwM)|^CyLVAUtx-^Ziz^g8En7r6=L}ShS%xM@v6C$n_2x;uc5CHbmf=_LWGBP`h44dQuPTqYx(CJVA zgU_WW$l8Nla++y>DKa@i?ZN=gc$>W&lBUsLb^JYg@}BDuP)CKQk3E$E|xYN44OJe0ME zL>o&0Ln4*?I4AOk-DPyzq(9}uF#yFPg|`}o6(c#ks|9Tt0h%UF#r;xN*=fl!_4CWL zaejM*a+ew)T5$(*cEk#^I20hU!=Q=bM{fBT`WYp%+LS zLH0%J8FyFy9AH=3wLqDq2svgQ<{1~pia3gP-y)|QIVifVgR~0|yDsIff~L$q8KICd z+%FB#862RwsTrcFcDleCgMJ5bLevM57tco>BxKrS*k&9H(g)1~_@~{(HDY2KA5@dt znM7c3AH-Sod6Ih;kOR&nP)B=RuLBXnUuY=gu+DgsdzccWgbu>yzK$^up#~57Hz~n6 z5e0I*8+6Rz!(Px2#Pym$*wn(1Ly9atqyayi_Mv6W!}>lI6F2iPL+f!Ua1#GmHH>3# zG!2hB2jUG)kT&;)S7XycjLLOr71tVJq=ZIM(AJZTV>tu2t-aZkO!`0{st^xp1;Er*2gLm2Xchpb%@ZHo9^Z%4#S zlEudHEd8-q+J#e&x#)Eexb7#`oF-*TM@23MdFNgaEQQH7a|LvxNuTg82-;+Ahap z1{L!V&o_*4AB0E_nJP1+*M|am_>*_i6&R^a1(W$=$Ds8#e^!m2R#YhUw0?Xi{pkPs zJ%mId{lnk-hlD)G(&`>9(dc=5_wj8C;6~D`pZaKe(-E|y5*=sa(u}!I z7-y#po>)d`=71a4-+P~8yEj+|=@;Ad=0qTBlxQ0`4M{Xgf(Rs^qAlA|Nf%LM!s+R# zn9HQ1H#I^dHv<}Dg?Z4mPtSY&e3kq!3lSYbEMlt?Hk558eoXj+K1UGJ9lS)(Spn;r zp5oby)Ek}=@yn2FcxvhZwC5=1I`6e@P=|28;Uc}%qSe+Qg-$|3Xa5x%+~YYkR!az7 zogb@>4X1WY7Z7fPpn44iD=2Z7yuIUl9}s4wID)e^RY*tA+$?)~ivETnjR2PJ@*rtZ zoP;;7;FQM#F@`oZd@c5fbOnT*xl@Lv%|DH<8~n_lgBP5FeYC;3*_Y7zZzX@-yjY@4OXgXBgkSCk$*1(=f*P|KKkLE&nXmL_F|Um`%bD zo-?E=GOV#jF$y8kgFZpCxj23pz*&U4#%vx3zQ!n1Ct5jS@btmMpbd3qq&ToHuN|oX zT;3ZP%9a<}80n5B*618Up^5OT01PY2@(m1`8uTKKiL5$6MTgdXvp^)mc3jK#*mf=p ziqRwQ*q)I6VCmEKuqdv}|0yi^rjU2NJfv%e7URpeaBYb{U*wrRFXYw$yl9Gx&UNe; zpS1?V!WI!{oI zj22lFM0*SmW|5gKoG3%Ey#dm6h^)8_!=?@`F~2hQkbPHpnv= zmi4`&jxCDfbQHGN81-G!NF&5+z9vIdh5!PhcPtNRj(y}UdN#@tu06X5-;gFy$0->Z zpGLkDHNsf6lZCAy>&jnv%RR1xcos>A>94V$d!6B4s!S#WaawcQ~L5);MPDS zId?Z!<1^2-RTdo#KKoK*Ik8?s$M6Lt!9VsEnsm5{VdKgst)+-=ba=0=zA8^1rx$hg7&2wj z%5Zxi8)yI?ZJ|GHm39DHXK21ekLt*qgf28(z4rX&3sDho{mIP28OYHb?BfXStJ57x z>_}X{AE0kW3EwMYPoZm3M(Nckq+@(dOJrk6!yw0<)ScD$--Cs+>DSx94MI4pwCy*c7Bj z7!59Az}scPK`+w)HyVEj(2Eg`G6=N8KFZht=6&}l{*qU9Nd2rb6RCqfV!9tj>M6*s zGSC7>4SK)qUNc*q4Hh*Op@nRA64)kv-*rF@z2^Z~s?d(y$}UKfqdVJMHHgEC+!|UN7A^L~}KZ}UCNq$8$ zdJr62nuPL1IEOOzMFfw;S-_CWBXn)I!kR!XWlFFY2-J=8Z-xyO_~t^HUQlGhNyBg~ zi}ZCBq{s$pJg)EjcAqa$9KMT)MqUhtk&0X+#@?gmxsAZ28AjGNFeLKEFL_M^S@Fh2 z;yQX`3NRIjMv{M5S%|M$QCT6*Qed5~oU z5}xW1d9k7sU0^FcUAr;Z^w~lPJ^2BU&uH<%dqmJLU%5bI_GVfnKkobA{8oDJ?e{=s z7nmM%nYK!FEjWc)*i%E<8d*-k3lyDewdr&)$~B4@3$@jiXb(1s=QMxi2H_SPT+_Ik z0-F)qCzbGoI|!Ec_F6i=^CS(QgVBKxAKltvo;yABFy?wFkJ~@|A!zAQ`X~SVp97ND z)7$U9lP)ixO&@vX=h6fo?dz|7Jw4jIN6QqvIUJU2ATUK*b*&N3eB-A-AlV;=22q%# zN8g=qe>=VXjjvG64us??fq?>@)REF4dknO^mYpHv5Nbb{o%8;uj16t@|rnv_4327hM(X%N#F~fan zZv~IZ(xL%vlwE)76>V8i*m?LM@-boZ@)*Y3JKs!)4A+={`4`eLPD}{{zHxGZ5JjnJ z^m69W7Dv>V=d^;^sZbiJV1GDHnCT3zC|G> z&!=D#&GnyXnTD6G0#&2{uzaCtxvo|4qRW`xI6cd0V`m+C!TzDdj9^5reY~G%hRuTA z_y(83&p3>-kL-txQO@|Y0!u?zGhIb!`x=-IT8F?z{Rfa+ysjy0zt1`X5T*--Y91nq zQ(T1g`DsLomo!|an$t)y4FPcu&w(Og5Lo!wK7zDKQd!p3FDBGc2=RlW(Q(4? zMd^!@i7K9euGr?#P$I5&7<@*T;z9~+Xe@s5k$7f4-2a<`=_kG;PSJ~#iAjUE}F@$8>EEGj@1PizGDi#=tZytNTB zhEw8mUpC@%JOh+?r&LVt|A)if8?)DvZBMtKywg+45oE4JF;dD7$hqwog_8+| z)3LR=$@<|T3{eY`&COk3Tjjh+TS4bL1=P*}wji9IolaAB)Ik^P*ccV;GT6fcV5igF z{>k@=W(=oiKl>}R8#KQkgP(3SlOXEDq*7F&JqTsB*GRDvpi+8u^>1rXP ztw7E7oSvD2*Ru3wW<0QR8Y4KHBR$5*0iTbyWu$_1kO{RO=qf2S!QUA5rC~adXb56o z*f+k44%u9Lgpmyj4~GLM1|8;q%_=0AHM|7k0E?@4p4 zr7vj7wrI)|HYJ=zr>x4Wrfdk6aa+P(KLERN7re-NStEA~< zipUgXxu3UP*d+FV`LS8{z{qVxeUN(<_Na(nmXE7Ve6dhYh*xHuui56Rf+x~u4Ot8`LtYBT5<;Sll+-&iY@uOpqPt;>8hK8R4789!V@dC; zOb+fG0)BCFCe2Sxr?3C$0zHgt^ACV_V@Z&{PyRgiu8A z#3(|HoDA)0?7f;pkkvqrDBvxdxSFnh^b_gAmcL=Nd<>P?+X#f7VkPhF-EgSJ2jsQlD< z+Pg5<02&vCXmkG&!;ahApj3mSp3$W{7_96qkEk>$@Su4wV53_?ly zScZtVF)NZm9=5n_wN;BBL6;8p1k5{;Z>B zQ5Q>r%=tgAZ$DS*JUUizUH2_|h4MGUt${wWHr|Z9V@WC*uJ5^vz>Shi`Rp@`@E0LS zQ9|fe2Z^VFKH3!MP%I^~K0&VA6Eq5Pag^;hZXt|Y2nD#5YrgAZ#BFMM=cObQd)0FyO){;n{f#bai^?mY2AjVO9%y z&^fLT+OgP3Cr{_h7FqF+_nB^IbWz9L{msBLqH3N&j(y=}(#J6FSMH>1ue<qcBe5Uq_E1JQHJS|3$Y8xVPbUd-|JyHUO!jNLkjz5BI{#Q*?607*naRC39%vPF8z zLgj$bEK|FTXsFU`zc@WJ0sR3!uqJatjgXB(+v6BIYqvi@l#Qf`>(4U{QrQjai=k7s zGXem?>DC(iiJpU}2n|GMAL0%~9S2ug=BJ36(umCVIK{2-x4}mh}t#q#4qw^)Yt-EH_4H$a-kRv=Qm4#CvP(N2GTl!0O0DC+D8fu5tO=Rm3dd5)OtnSN7v~ zVrvcFKfwsMMn=aZ(j8dC4$d;Fj0gg2fR18d56@HK`YoPsg)!g`8Z&+5wXeQTD^Utf z(MfjO;88-}^Wi|swHH|wIt*u1`NrObqU5<_!#x`TQs^(zkB}>Z1tHitQ$OqbfhCkk zyugFF*41+LI(~Euhws#4E8u zW)PsB28Jz69ep<0s3Fi+^W-#SWmt_B$edf5u0R-tzYTMshDB6@5r*J-{5^6{_#VQZ z@3Cl}(S!FZD$-epAtOEPK|(>rK@4yD5Duc0;inJ?6134h8Nq9~Cy^h=L5l^6;^@)% zx<UaL*D|#&u8( zi;c>7!M@KhS)gq^{~$)AnaZ;hvlwEh>EHhD@1;Eu>-^%?)IljXc@~q`iQ9w+i3^25 zXb>tQOohU!N(_WI>h$QF;#xMVK0!H1*e&{i<}toYBeM`DMj3l?1T&3cNS!(V43o*8 zr0V^57`J&o9WqXIdygt=se+|Jmot}U3`SkDZKTbdX@%Lk*$<-S#5t2E94BF|@ z#)BvKnpdIV?Z>P7u{3mpaAcyztQkr_{uQP)9Mw%id`u*Sk z9eOj8=f~Rh*i>#RMpGF~QBWDRQ5+m~2z$L|m8ocvA!fbmjmHlH^)ZiIvQ;Un40IsO zNfsTJ#;XV`u19v1hG$fgL(EWQ06<9-YYyxzpx8VsY|J{7ui5 zHH}JBoLlryML?O8m4$ZmplUVVC-c`Z>>)IqR(P(0Gm797VUWiLiWAYk%T`~*3Qtmb z=4e55ys?>f9;|@4;^-oz3%s`j{aHYg(QrZdiuQB}CwFNN)Db-l1&Tu9^^)l!@UE?` ziax)xgR?N7UxBZr(@0*p{v3`Ch#T6=oMQL8h!Crx;GYVP;4BOW?i_q=xK1T&WF-te z`O4~r#gyuTI?BTx68oM^2E-P_)%rIX8l zzQxeE)!!;&FQzebip+5osRjVOrGf#rvc<-JPV^#T=KN?M!eb-5?c*| zm2!qWKRn{Oc!6j+hGD0S(us4A+$&Re!hzv^%4BcJ4R{=`$ul{XO~k2zJUYY}G=J2v zYom_C2`@Gg64Nspn<{h8Lm7s#Sc487I?xcs*C=5>MFz!315xH#w>Ec>^(FE-8I#Iq z-I6MXmfcTuNNg*VMb?Yf*`Z(;3uc$GmJ|eo26YEoe`{U*}VTS zoEweEArMsa!kg^(!Ol(?V$zI;l?B#uiacN!)Hea`lQ_5#6q%g^kxFd{=B~;D$UC&JeO{dZ5o$cp*j|QJknIoW& zNC7KhEJc;P#t4X886?}(y)~R=2dZeOq6OG5(nHYG0!bFuspBI5UxSc!_IHoJLMOvS z=t}UettU(7E~85gQ|IYxyl0^|=W+MVpOW@4mzJ-;kTxhlv_Jd_hS3Dy?GjN2b!==s z38w>!45N&JZ9KQ9L`WW zo_#RAOd2s=Mm=F8&c&yR1C{a(t z@D^4_uVwW)=OH-fXIsdj3LOu0hRi(;z>76mxasO*A|0s?UG#L=2^bnJrn%Wo!t?}< zTWfSg;tzJ|;=`GkYUtlf=%nZx!gKlh(lR;0`#g(CHDJ`)CAyt({{9|71Zy|1x5Sx+ zBg#IE;q(nsxCR%Xr%82SO-7?fCniG{*fGJV9a@Ne#c*bSGvxo~&}nUCJ_He+LQdf& zBR7JT22$ooOB4`0gf4Yh)m4L>^9B)<$#K?=PCg`Z<35>xuD;*e$6+Jay2@DdMSqd>o8`td0~USmC8oTUJt{Z91FEf4^SXEsl<*B<^`!- z?2XruuvvBhed1=FJb=s91x_Sm|6j>fN{j+DSaCibLQt6l2@rX0`fYBHqjl z`rdcHkzRf7rM_^pUu5fu)P?V-v(uMgR*?Mp<@AUD=JP2}k;;W%{f$7Kj!{;7C`x-7 zcHyE?P|72U*CJHRp1aJLCWLlVAn_aLZm z!=SgV54#;H25rWi4oxg_Pd-QJCgzBiZZG?C}uAg*_O&qw@kE zp1KwpROW@=j&vYh6ZBYT9Bq+7C>#dD2WF@LC32N7m>K%#fO(C7&|BB%=KfWbfjx^Q zdUNJPNlrmoG(<9V;vfSh4Nmlhn_)rIJ?a@IAL6W7^;;nS$<*C;mE1!Fn$r=1D^b8G zXF`ol=&HyWSiIMRmtYkHn+^sDLjj{;{7kxi?=d26;L3%cStdl**c z@HGd?&qL9KW9@xdM~7lUBM};yItew;iM{K~MVGGO%O>b)#}^;+%YESyEUH#O$(ky8 z_r#pK$Z=#V>}KOI6?<}eWpW*oIm>!QnUv2npo}754(BOggh*#Gh=7v80}QGm5J%YO?odk*5D846Yiy-KNV&8)G7i-15FnMg?17S=q!m?VO}h z?)$>4A7j6(X@`u8ukw75zZjxn_djR)O4h)9e`$U;HYNd6^-R>ez!1kpLC-$i)Z4zjikB< zgD^=vqfWjA>%!SfWn>G!!Nb#FIdlEGgij= zcUvIVp;5E`Aw>Qdht79c*v;WQ_`)yJNI+2YM>Uy}NGlJ63l!2TLO_Q7G?saV%Mq$z zW4(zV32E)M_)LsPg*a_GY*;^Az^Fs8YFHL|c*X7*410{Al(z+MXCU+ zt{|{fvQAP`IEv!2B^bMeKoHb=M^chGyMA6Y&Sz5q*#V$_jKM*Ilh5MbiXKHiGSW6G z=-K#wHwd1CHG5yt#R%&t=wJp7UDqk`$Ms!uyf@A!K4Z|~g#E~nL-Z6?G;h$*!~O65 zFkL3hlBKO!6DBiNpw+RG7Q)70IwfYE0afgAxXX(d(j9Vj-+tpwjMQ_WvgQtvpUAV9 zBwVglIFbVRVe9^TuRl0z1<}6uLt+I>oK}b)W#n4&c^Dr}pl>)X3VcB)V zUXLfZ-4gUHmBL%M3BABn1>CD&{`keF|-$RD<53yA!4rITAAvBhmL(_=)- zTagarp~>_zt=tW*uxS24(-^`%pFyE3k2yAsbcgU>Wmp8et;}pa6Jx!hZ9ngjfGmQg z#=f+m&-#1( z;>STKjzH=U9^DP2P~*<2I6mX};!@gTDphG>7P^C8Si3SsW3Ww1h)UThJO~HguW<&g z!_!gS8_JINvYzOZso-LbMV{B5j^@-IQ$@dnAPrrKxm^_BVaOkN7>STyrS>yFJ5L6nXl73}zG>jrO zpm`OJh?4y_?FF43=^M=U#I+dG>xR5lM#l5;yFc@p{ue)c?tbxi*Dnbemc+%vATR{$ z8ug8g0D$(w^Z!3h@7W|-dZy>S=_*ZSdE3=p)xLYs$k0#g25-rY*YM@hHdjS+u3Q$NsGW_x0 zhp(jq1x zz%_ADhI!}y&C*42m7wPo9+|@fm*vKqx}pbPeVBPbFAK;7)=a>3!G4<#An!;Ro?hkA zwm#o}sJ5~kD^91SSdXm{GWTC@7HqmU zo+uYa8M6-K7`? z`M-88GDX2{uq|8Th{K|k+wiq*`^<;clj>)tSZUy0yK#-*$R7Zu^1^xgnJF(G_#j^K zkcb8bX_?;^*J)oXr!7c~!Zg-VFe{ylXN+qJfam5H($3Z<@}Iud=6xdvJRV(y)n|=P z(VIsN3_0}iBD5i&o$&k%)?fKN1Vh?}uZz67!fCU|J(O3MM8JpqOb{jZ2x=BCB3K>d z!5F;4(8ACOLC5%|eT|x7I16~V0#G~vEeG&r{_YRNfM5i(vgYx_r5b(QfH~d#<_}`G z_y?=Vo4ouYi^eJ|jp9M^x9@YY%2{10(LiAV1kE$i`i@*g?qi;rc||U&ee%M2ff9*h zD0$Dbjcwnhu(S88;bq(XNbG*!bcPnhG{xqtNc33vXhS(}qyuqd@`Pdg9E@NnN{_Ss z{QO%&m$^-kka^UX3ihcwLga$eUjQ_&iyj{dSt~5S6W4^N+sn94AS|E~fcx@$3JmVX z`&QHv`hq2bSzQh&|Am(%iux472wYE33(qkEB=bnJ+z5V&b@BF1{1Gq;*Xp&JqFs#J zp(jbjSm2nzqs`s)rc;e@n~D&cBHwd2s#3?; z&x`GCyySRH77$oRb-&?bzlP-6cei4vaC-qBXNhWDoV^FK+m?fvxjo3Bw+@kMl0 zu*!P*?n+uB2i2n`zxec1!he8c3M2Nn_R`(gUQ3HBD`|`F5q3a$^2y`$M}P2#;ZZJ3 z(^r^wS_QnMn;Ux&K6zhr3kYVKPyo1xQ%}lU^tHi z&Ep*%1MJrTYh!e!(Bd^`%nNJ-bH;iM>r1K6NoS+Qd#P~aQM#&MOC5mi6}fzE1a}u; zJF##L8U;+k7#@SI;xEEjh9s3dVo-&M^j$I}N@1X0_|i1zk@LAU262b5)S4V&w1prY zr`-=@_88bIEv<5-6yyohb)nhChcA)0L^w>o688ywF@7MVe<~nPvb^Bn@@SKgz!5&)nGBS z3t0SG@Gh*~MjnC#6*gJEE-8=801nKh~J)lMAqX*O|sX3^Vgn;uzGR!Fj{R9sW6{PMh z<0~UaDtZRl|7bsktdT|pUXWTQlc0eb#*MTP;b3@9pde7~BG@lch64`^QTQ^Rn6z}` z`p3%+V0ArpYPqfN@L^mEj(~DUF zDETt2XK7_Q0JO5&@VrGb7HL(GwRVtMUcQFm{1jt=)DrF=T_sp|*Ue+RD@3zi{osS( zyS}MB@R>)(UeL?>k``ze`?RAXZNq}J4$(KkO0hb@@wp3;GE)O+nuk6+KLseII}4TU z7VKz*nGR(hv7!7hM9dq;!#TV>O84)rP!LIY8Ozj;61^+hTP@AP3yckNTYIn?;7r51 zMOxU-mm81(A5bJbvARZ3hgMYZ=mJG!8nzlc0>6xwh~AKIYRF!r$NX_SQB0tN+>2vU z9p)I9yrXVY_h^KiQD7Ss$9ozihUx8!BiJ;2tifT#!!)PtI0NQmhO6+wl-@@etd8ez32(I#BwNMx0PL4|GR^h&RMj4IWud!rOL=46NDT)| zAE2mrR^9JpvtxqYD!LeB&Rx%< z29ial$_ESBltuwap*QF6^~xQi&qill1KW?v@BYVMVelf41m}Vx`Az+=ms#0h6hq^} zfoV(!=;b>v3}u;Ok``pX+bbVL!TTVQA^(IF=*%GY*=IM~{t z0DYCn63=Jtx8aode4Is9Ma*qLt zwnf;y?3^IM7=jQ;c>}FulRh#)=`&O~5IGiihi7#7ZW&jGU{~TR6Av828tGy6dTR+) z_d9!mm1jO9pV3p+9X4bG|$Z zSYt@Z!t*zAF`6zwx=1b|9-v2JSG%yyuCBJ;~Wm+xQ)A9QAbo=)0^cJ~V zo)x|c1HAq2d+GQ8<-g)YmiOsWaGAdR$={_O(;NgLM6sSNSfvJ}@2<2-@t? zr|}b*1jRi|&q=&R1Lw&cFFg4U;TLHP&>%x*51=X68)lUThJ?WEqHt?<1@KA*F5bjr zgty-Q4tus1V|ZHeOH%lSu;W%WXf8^fgJ%R5Hl z!y8mUsV}g9=7^f#W$3m>R|mZ+1^#Xv?}t#XA+Sy4*P+`xCHER{h?Uuw@QY~%#_v67 z;S%e=4)9mmjYDg#!z044q`UAPbN^hl3KhHdqc#)=a|aEdv*vsOk1@hXp@4_NYUs!V zh4xKz>B#^J7S`U>k4#SBv0$9THlBHU@GAJwfx%0w6#X2MN?~|d1*D=Bu-I+mjc5xRcV(idGDrr4^ zf-mm}wQxT@SMVi9lGlfE$ulyK3*7eYMD673&7%ZkWSk!8` z@I^#N@W5o0C^28XDHd2bM>sX6V6@6Hs7&JJoI_E1mGkF&zK7h6xw9S$9$P-S2l8-q z-Qe;tuvt3`3Oz1au&CT~ZqbQ>zoV#^efFBDLr@NKt$@@2McRfEE5^>V`Al!OMU)yb zMF@N%`pNb<&FhR-$9U&Ri87r-Pn`uf=6z1!>9^C%`2~5P7_E8>mHig&n6Imsr~?32 zc*@TL5CO9VbX9T_9fwgh!^q~EPD0zBXk|);?YuNhqF@M6sONCngLhaWBQjS2Gz+rk z$+b1&H3l&8poIw(u=$GSC3-@oYZ8a*!1sa}4eif<@-u*0DcyVcAZ;r##LRyuzFr2KnPd*WN?tb+w&ENnKU|HZ}5!>qh8|id+Kh{W{sXnvl zxK7%`40(%T^inu9O?p!ox-*qerCdOHPjZUcB(g&fyajfqMD=KUF@n~bM;tS(@yBzg z?%zQc0b;j_;88%PbLqv2rgmJ=cv0^PvM%u2nes7%C(YbbL5HasrYRWWmsZ;tdKwO= z(2A@cd&b)8e43(n>ePUJ!J*MWrX14JizD_Jb1rQ@{fe3gTB0qjrJX&}K}gqgKPNa~ zqDd664q%gi3V5qMNWnhD(O!ycUB>}!Eko5Ze?#A8)<-}mZ%NNxbVm)4HBC5NJ|2yc zHpTreFeZ$Gx)0`C4^WhzY$(s`{E_37qtaY~^hKi)(H=}g6ECB{cZ7%3njE`DP2hmZ z+PA?Y@TW$f(K|h>Sy3~X0{Yb3t*pC^imk$gz*y@hK9=H|6C7>Xn4 z{G9bzxJE<~`Ra+eZ3<*T{sOR&hhnKX{ZW05jA4P|GYvBh@1Eb|J8TH=Ygix{{9C!I zuHw%s^+x;(%v4AZf8_^C57?Ktcq#Pw6*3uttH;utNVM!k_64jhbS3xXJ@Y9Tw09I> zg%Aa7DPSV$n|o-5Sxu}k7eqvu0w4h`Wr~jPYI($GJWC6-jIi?LBdyAw1v6}r`Bw@7 zLk?Q`f*+(R3&QhI^h5J|n2YzhYS1*BS?Pt#5prIkg-Yw_8TTL@=CfHS8Ak&2l@)5R zxI8~V77M^|<0a6RShOs)@GM9Zl&M@?YzbSyY|()4D_{+YsBCjERfml+>}N&0eSxiv zRZ$8AhA`#H9w!%(pVeY49v`V!K|$pyMw`E2%n0Ez&W7)+gub@!qGA)_qW7-F^F3w{ zvB>=Bc?oE>T6Pve(550*Caj|n;FfrX48|w{pJcM4Bx-oWY81~^hRrldh^zVYKTq@3 zUi!7~ea8&~tlmk_c6aE$@G$-G4}Uj=T8Z%X1#_w3^?3NoL-KriK8nJkjGz4IN9iyB z-H+n<--So7VEF@n@ID_Ano()@5cY9W6>Fr>i*`ZmiN`zkQC4Nvtqy?|Fm!sf+Y0Cd zQR;nVSJ)SRpo0n_$Pw4=6V^J#8&jOa1G4;$G`?_)ql|Y`WeRWd!u@nOAa9D!3PJTS zr)Rlq)v`O8cLX{!M5NLkf&e2n|NKurBE{yXt1#=Y1zb%tSQ%8WW-MWc^gJ<6<&$mXS~0WhDYmZaJ-YQj&=}^Cm&2<(jxs?c#~JVDOPXSHMs8sAX4RWwh>TT5{} zduE1+U>(fo734*JAHP-FEkK6Hgx3LaJ)#L+jQ!pV53j`dweXI)^PKG8ECln8zZK*F zs48>j#<2z2NpsLB<8U~7Pc&}j3x5k3x#lLW%GUk4{#;YDgjw zLs7|FdZDK91Qx4P)Z2MzfsP-ZDixht z`0hB)gdwR}?|<_RWP6iZ37iiYe^(f&W%x)5>mq-+ZZ+u4R~ed6?)YO$X5^G}y=*C1bbsYT zSx}l4zC_w0d*nL#LFiHF)o1y)U{dEoT!e6p0$61N&yYtVe->Jf=riE9hRzggRG|;C zR0aA{*Ljq((L4=S<(I(L$g*h*Y{w8P>Ng~XHYvx7E>>Y#T#aY_)-|U zNkfSEO6v%BdL9p);6axFO*?tBHWfi216MmZls09!~H+s zg;)TlkRi$-6TVy#VLLrZ3Qw8aqYrHrrP8b3)W%>6<3@7l4l#+Y5A>J zu(0jjC-@|{?6W6W|1ALA^>o6?JLZAn2n+$s#efOMrb1+0EvTyl%FkMOi;?PG0MZtO zHj7ucZ?&=@NFdGpI;?TW>Q{1DC#!_)P9CROJilAN_z2~*nf|N){-4Hq;prHa*cn{uw7i-p*HBXtlMh!v^(it+LGARl53G%K;esAOL*RnrBi5QyJ!y4fv*4+?y zPC;Z;fO!ksi#00s@7_!O>yID>=7q&tTbxUE_WRkFzevk7x7ZhioB)lzfdeQQFmc~5 z5u0Mb5P9kxJ>{100!D>`2ucci zl0i*RxjN)MX`=sIi~2y(l%LO9;z|8nI5XUGR{5 z#o}Z#h%qF36MO1ojr-XW%5{-W3=#U<@u)~V0vLf3d@ny&PVrWp{sj^i|@)U*uh=l&!`w%RS-hU`eP#CzV z{3QFg>k3U&n4_O(hv{SnupxW-w#vPv2Mup?+PtSgRt2M6^)#q>fNK!HJKqP3FRS|4OHwxzC5PX*R^*VQIj1D6gcq#K516)6`2;id4nmMl)bD9n$fNJa> zBSVnCMHC?U%u^K43bb7yPfWAj-X7pc5Aqygc?}LjtEUue`6R)wGT8OBXKkJoG@suD ze3?J(7<5ut%xi>g4*7Nft(C~T_C804v`S;`1l7(VTSh+PS{0HN8qe+rSVk-1Xv|s#ADe7)Ypmh*n(>Bv!~EjF3phMb=)AG zf#+_LhR{ZD`2r5moBTB0zJD{#b2^`fPaj~VE*s~&wn4MdPSB;^IKxQPyJ|?ki7d3Y zI9e!Za(aQaWS{fM2U9`LN!hB;0JurXm?Uze=hUNM&6#Zw2^GLt%=H3L8KKB(8pITW zEC30>Nm@D}$J*1f1h?u4BOU@*>BV#(*L<3P+X}3Wqh$AIpOS;wp^MMGw1u9i@qD{B z>0Oqf)tkaAuWa#P6eA>pFi#+Bu+_a!J@)RaUaiDYHy=;TfLC8B66aMALO+!|N?shX+?+$a1 zjbrX8jZByGk;a-k)(+hda4@ebQW3Z)F7RV57dI9QDsGNY&H*$KcVQ{`3;~1$&}T!x zrc6L3V8m-xLJD&TEr6qdgI9bNV%K^wSE$7EJw0rMAV23ry-QYR+TYFR*Aa^526kaG zj|p`Co^!2-)8lF(D}<8Z8BYdYK2#P{fyh;l)yHFBE#EfCdOHB-`yuoZ>e*|1*t~*~oO?8@&mnmK-Bm6f1_7jK;;e(gb+?(jEk9&!R1kfbq;`1%&}E+?Uh|WMD??maro)VpyKEI~pI&_>JP=jZVrO$B z-D8{^ZK}UZcsZ`mr45AIEJbNm)@E?@70QYNIEw0!bn+gal@q*Igln)$i-H1$9VOmlmP-) zRKDvlt`t1)U4s~LjuN;a^sHP^a7#OpW5#2=hwI_^BVUiXSO{Y1UjXXbM{YI4fZ*}+ zvREj&AV&9ASPTGno_Z2NqO1X+oaHcmM}-G9p-`;)&*&=B8(2q;0TsG{+9E(BHY!cM z=>kZxeN?%1s4s$#Mu#+}14)EN1wt@EzSx>0mIqXn4 zXB6_*-CN;QQH}^8^ynV!?UM#F%en~=1-cYA0xoB0t9E?8&))!Mz@orJ4{kyIHtFB$a9vSX7C#8v6TNDa~1gzC#ykw z8kwGgW-)jf5&7Y(Sx1-55$1PI2jSA-Rry5PQ)ly7XbI0TYo}bQAWw@NZ7cRUr`=-> zc_XfhzQe8$^4Q$s?PoZfpz%6h)Hbr%bhTN%=8Vlw3`6E8&o|I@?xWz>@kZ1M9jOCD zuXjGj+Hn2)SL^WDT$;g9)ri*sFp_CO+dkOCy^TK8;0m2awuiM1Xa`w1&Nb>ed*tVd zj-IoAXv}oVsd40GKp)5->tOG>LC_0GGQ+Ig z4FnAXwk(mfQn0v>^T;>B! zK$dRe1@T849wr~wGtbadrqUPSX(f4?loroIA)5A3LFm~nK>-lI5t97@P5k*hlghZ> z5p-Gda||KI4UO?RiULOH;;FM**aG~um+wLll{8tnOLI)Nr4x7ENy-IG~M~|9lA06f?luWKam?{o}$OSszf}NPlYM4wl$5w zTd#&kY7Vf>%x^p21mGdOatOEvgrv1-JG2(c+V*a3!rw(eaSyr|HumL33@J>$Gf(t932 z6v7Y?dwEG=MgGHS^j4g$Qm>O2eALqYnMDXC(lCk9|{^ z79cWHF>1C;gFZb9I`WRW=ZN(-G}~j&os->ky8Zfc@WdW?T9BRw*DeQ0&@dEqnq@ieVJ^Ttmb3>khP8L;lnSztDNm1$(D=({vD{ zJoe(%c$UWkKx>8p`6B=J`Ml3Bm6#zw|F$cQ z`QaL1e1N<~!0|;Du5wwUT@R$8UJZ05l}gxfr>{sii{H#$`NjKs%|jCVJRic|=utC; zjUZ}xODlLcmAO`+YxK`9&ITaYNHA?B$37h%(DsS75&Uv%)&&fGSW4W4Avv_ebw|2+RatLR)^>QD;1IdGWY&jbO0ZJ5T$hO%$1@#d3NMby}#X zJ>|;qb!i6AodBD$%YVvRQ&Bo}lPLpqmu`~p4@kJUpqLVR932C$0X-U-6+FH}Hr$Rh zGc$M6!IKS+HRr$$yo$EM@-)yE9_R|~9H%ktCgF#(_4RBVI95g&5T`WL*9$v_hm0y* z#&p4fJ8PgGS~<~wRxh|h-xQ2#j@z|;;@Ju5O!6=Js^+*G^%((D_73+$w)AMlQvtjf zr5V9jWVi**3iO?aFHC7^(lw`uL3TyG#;^U8|2qBok3UIM*KZ=@Dx^So z#5w6chNu~zwFP>Y&4aZ)l!##UlHlzcYtA@BfqPwq%(zyEzy)Q$|w2X)DV9(q{HisF=Vgr z@EoiES?eKyRvNar+g{t@y@qL*mZrZJ3(y6g+DYcoVEx1`c0w3YpCn%k>=j_j2@Oj- zqgZgN%&>Q`GRN)})+4le-Z|+YdDhi#G3u)562b7j#zJG)!7dKEnsi3>p-&XVBj%)AoLmP1s^2vip1neaUr>q>{ek?|G6AHk~8b!~3^q|I5 z7&6|3+nT%q;6iW)=;a+Bh?3CD;wH)rjx8h>n=gosdDr%y$fmh|2N0+`-UYU=qZXt`IqG?;kWZ**<%IlU z*R^L}7aKE%)y#vCB3QTP*0GUgz^NWAeS`fD>&@Dp&NGoWPzCn^K zYtAMTCKLD!DKM+h5^Mw@t56h&hGP!^ebAWs(RK|ev1Wq{^7mQ|nBQ=%o(X^o!W(f* z4%sx{rF{xHgK>^IfB7lS7TwmUhGr~!)9f3n=|D^%sxKHH{9}*uS~S!BSKdrpyPME0!rYVrz($2F zQMNEeB0mZ=V6B>maj5A`Yw{RLY;QjFZM>t9y`FT_a~UN6;m7KD~Ss8zHIf1vktOYn2@<-7kqcl#GGX-caQ);ZWfDvmTry@f}X`q(Ki#I)@ zNV&1Oo4uCj%TMzNXoT;o@U9T>8m=n#9v)?txu7_JEl)BGtHRN9=TXfXx)6FcZsri6 zSJ~j-3THoOswz?MVGB0pjHy}jiL%4#XqY>9l|4(s*l(j&+>Y%j9G3pygC8+Mirfp!2|K&8K+hntTc0{p zSXry^(*4!+^z+T|#`Wk?Zm9hR2F1}K-aSsM{OM1>BH9UHWUp$_o}NH=uRt-bxyv_h z-QaIMgBZi`xpX>@uFwP8X#-%+89N?T4>UO3Ap!^JFxsg1&pi-C=IxNcUT=T-j6KH_ ziSE@nxxz`3r+uKsan8HT;Q>3y{eS$={#p7M=gbwINA#L0hmB@T&rT3!)U&R0Myz1h z$%&w4J){+M%7{6J{zf0!-b#bkB5n&hgEP6-&pi&={Bwbzo^rc2$#0&vDPYxLu!bkV zuExCGVz_V`gF-K}9#0GY#*jhYhbd4Lk3s7n@PKR8fEG*zv_peNfU@4s69ZG%Tw}f~ z9fu=~@%x_QMrU1gQHb~Wo8R)ia@T^*z!l6(+z`3Z!P@4nqWi%O-opr>`qoHujv9+8 z*6`M?RX{rR5CCk;ZlokUe(1ksc@%`D z;yS!|3cy%z9?GEjarr||@{2vSWuT6SAw{%y1heO~-Sva$z(^Vf7D8*di$N`zw2dEV z5%K0j9VMGDP-D#8RpAQ|$ziR4q6eWgd=EjWvJQd53c1j3Ph69Eibd=uq4yBDdTcy- z!zw>>K0~nt?U2-svbFKE`8*PVpic`l05$jRb5nEK1Q=EeTWC^ev9-`8vT>NU%>fQ3 zCVOd}T+p#euE%=4aes|#&@E@1fIHp#mucf^Gu^p)H+|OCFK~Z#y#P)t-;@Rwf#5od@R+~(R}I3B(%l$i z5d>#E!;-P~f*nD@?sNLxvTqf@mU-H34}f6qNFGu!@bm(PWn*T6qGEhnxTH3W&o8Ex zhY!-mGeVIXKF}K4QVbXh=ltX#oy*q<_!k8kO4N}lLIN4g`GTnz02B+bR7M^!(0Kl7 z+T42y#6JgMvNQf3ipWcDPu0v{ z&wR%^`)jz@5U+A7^Az!igy1}ppFA)dFEqqod=EecT^U_ck=cSwAug!JaTb7AvWTi} z-tKTc$CkXH@)aDvqt6q?w*5k+AC_L<8kV5#7+|b3;Tnej zVDyaVDK|%)rZ$DQnfzi5%6a72c5{pGQLu&QRa3$=qdPA>fZjPEg79&J6oD$Bv__$x zM~M5X61I0=`;3c?V9a=Xp|AOyYvhieAn)*K;We$Y-vf@dzQO65-~8n_)9&sQXq|ji zc-ZQ4i`JG_@b02tmlkJ;FacDUTX6xyndd=yq@R`3Bs^u8Bu`VDz{6*%&=DRw3(_=f zJiW9D2#w=r`P`gcH=NrMx~EOl;YnKH1WgT_BgSQiqX2fSvn|-7Fq-`_2h+a7gE~3X z(kq^Iz5W6a=UQVu?Q*oWv6cR#|MtI0n>$AoHQ7s*@H-JV!L$22AX;W&+YsKdcEzqn zbqpK}&ddw7cEhO0!4{mr7(A-p6ZKxC{Rt`b#z&NbISZ}?ZMi=3f|n^prW^1mz%_l) znZNnQ(rpbn%;gaoY7iliH(Ut$*q`7D-jkM0`^sn$!wde+JhcLDqLcvU3k*6hH)2p= zucB*&G9J3sVE2Ar=S!LG%dmd%8uRNZZ1{(~shnbh$QFz;zoQI-*Pjt_-!9&ceDVo-8A(V=s*j|4c6OQ76!)VA{qIf1;Qcs(WkC{M6Q&{3Rhpu*L-}jg$76TKKFh0 zqA>O0Z~Z_wy9zLSaghL!$@&GZ1(dQdDh(|vcEGk411PFYLb82at>1^k5styU&o&$35^cC^+Sy8(H0C@J~X%LXa<11+c70(9`Ik|R$)W{A$V7R6*uYSHZi0XF9&8c$Wu88#8g#u`5jLFpp$MWcDZ`5VCBT?# zdv&sp;07!L6jij5JIZx>t`rQQ7M=q*jBq@?2zNG6!1;9hl?U{P1u*fM0BH!39uqFq zle_iBvuIOu{r-IbA^j#H1r^Kj;X!)iop;ke`cMC}2q$cQ_Br#w03jDi#_}j*JK005Goq`)^+H33NQ`+^5sYbCcINnan9QZ`~94*v7PtDE8 zc-*(ZjR;AATa}a#z37Ji%~mWCo@wHNwX)pY+YBrqC|F0sZCbqJ2q(dmbX+C8E$vAA z0#=1vq}{Msf+u-KPYHt$tN#*VZK$kcSXhgbbsi`6M4^%a8^!<_i6SJZUck%d1h}z? zD~HhR5>g!yA+R7c7se5ua6AwiQ8~t7|6se9bV>0TVo>xo454|wgfBgMl=hzCEy8;% zt(*U-N6eIoj-E+q3HAw|V1oQ%n4IE1cv)fV>4oNAcc7=0`w!Ah+R>RRaz!|{&zP3) z-sQl6L)v!1JBGg4PtX-Pe}u0O2<38^1(H3U?fnI0U7nBhh{w5<(F>M83}G5p%OMC& z7f}c)%gj|A=?&x`eLNxc*Wcy%UzeDH!;c}X5Sl~iE6-Z_kuMduQxfF~hBP2lTwU&~ zhuO<#XwF~8TVYX{fZh%lnGCgnk-2bR)0y;S4D2$(&n4??3n2GfW$0lC&WF+REEN@z z8Wc+SIPn}cIE}Id&1uxeOJu^B7z~5tw~WR>74XUk|HpJuD9Q_gqIC;^3C0tiTgD&# z$>DpAgz)mj^Oa@ZlK&`67@LY#nWL;z^YIDdU3tkfd4|6$00GCY(c{tU6EKg!tW+~+ zwiyQ)fN~fW%Fld6z|JMWS7TLglkK5Aq{#5`?EDh+VU&;ZL+%%R4rtXmF@>jc7QDLRNfBC>mwbOOrgt zgY~W0|2Yn$Fd;C9PErOB0q*CtN-~$VO|<9ky}KL)u>%M{LNQa@u(lEv@RRhv^~5-f zq-L+L07}p$dZnS!7W|oKOj6hdIlbof_xub)w1VFG;N3?=x+th)y{BkFc6xZqUI2D* zswf{O00gH8#~ARtAqUJUZ6YhLaBw{RVx7oMH$B_I`2dkmt#V2whL~v>rh2FkyU1(- zi-*kU?X;F<5?MNqJXmDyEJ&M~C$$Y{fzK6uSseGGXTIxc0j7~wy^0p6wE?YyG36ZW zMi+*DL}<4jKThBL)qjB8TuGmCLMV$3J6o?nr9ZOI4g((Cp?(!KO0Pz8#1_)!1cn3~ ziTt@lFCLTAiEM%wOl>pY-9FFC5tu7Hvy!eoc%@-^)`Dl{p1ML~)plTZWl(k+Qg5Me zou?5TJ*gTl(upr4%BDhz8KQ0hemf&tDPLrAL7)e|L7$sm;7=6NOH;@!euaS<@}9q? zMU5*~Cw?2nFwMp^2E`21WKI~1@PenfIVRHsOu3NrEDqGu8Z{u&>jb=n{IWZR-7Vy4 zk4YAssxyLK(T(z;x?^!=5nX(Ya|qf*{^$kw#59c#(=+5Z+t4{)osZ_ZDu?XSVg9mm zz+yMO!Cu#dmjW-a=jtU>V>Aqmez-=~3mlLFGIRGXosdYgWL~BsXyE81us`>Z@8r2B zDgwS&jvx~O>Z}lvO;amOxu5{w!bc(ldfkm2SiC%gtpDVPe?jkFzKu>9C?C0>27bsn z))N7f$#>VCIdMaNa7gmwQQi;&+rekCFX(u$mDhMiM2{G+k;KB-%isM$2<7TaF_NYw3P)JRLuEd~4r#H{gu3^z09#=uM-d(R(XbUb) znubPr59WfP4aFH!mN|Jsc*VS*R)^4w*YZwO3n z6LN~Mn?vYL-QXN+gpIjcg3JM9n*!w4V6Zkj?kPEj3c4`WDaqpA3(ko9sT3u|oCL~y zuB+CV1ri0A#4ZyaSiummL@9pa4=uAuwcvRIEUV)ofSEPV`td<{ ztf3w!Q$Y#p`EjnA9sdVfd@%*zV>CtO%hBDcF z{Dl6HiS{|AXKZFU-wqv+`F*sdc(dP7!GnSZDXc<>NpsMm0-(ouoGV`qfvX^!#8Z{!y*XfM+D7k3 zA)vS;JS6Cnfy6iboBNm_X6k^+`hryzH?RhQKpQ^Z8wyOk7wa{G-be68jX!&$XJ2>f1MJ4}kZ*Bqr@Q9i2gj|s3i;SUzQD)nz zsl|ohRcTX&7JSb20^Mw7-~kn&e8n^6RrAFdMuajA?U^6#n0hMt<$GA~2s*vFN?KDJ zScejG7x0=Y1*>s?){X}V6n*ZC)?I8stgoK3ae#Rh!&PG-D#TGPh9>no2sD*PmGI`X zFYsXWw1gfEy$c*dFe~p6xV8>dp{VSufM^R?{QTq`1%TWPxar2?eLy3@hbU)x8~JEN zM!9R?T6$zB!i(WL3mhDWGF>GY{ml8dirRHkzN*xfmpTztd@47-9kNw`>yjAN^5vQ< zFEo?yC!a17jTBZ-KDP==biJ+Tt-m3yO^qwwK5pSmm z(}(JKssvk+Q;dQxW(JS1v(H{O%qz#q^i(K`50ujT-QG)@Mk@V0flonC$E7GC9g zM5zSSwjR3#fJ-kZKIWz#Fdd|aoWP?@nv)^$y}jdfc-Wv0qM0^!8#qn?(Uoby;X-P# zb~SWf0nh32?jHO@6cs(>!4ejzP6I|viI~SpF#{;J4#xh(m;Cl*QUOmGdgyBn3XKcr zWrv4-?k5CruMF#p+M2ANKzjR&Pt&bCchlWh-cOG)AP$c7-kU;Ei8ZVv|Mb4PpUN-g zo6ZlPrD3gXRHvBYXde0*UiLlGPRbZPSLo;Zi_aNj4*u-``UR908=1@9#mS{J!TfId zK)j(>R0_5&pT&>?=vh&9rgVNxzOh(B8kZjJj-)W~Z(F1}cAZ)lo*KRMeF3O6Bt1z; zkXpz+!}GyMFpq|%V%&mT>B<~kfxQO{32v2HtUyG0?FTNX)QD7%n2w}@;ct)34qd}T z?D1_B$#e_By{9}1Vg>g)Ff{t)TMuJV)_8pwb;#{YBN{*Ko<$Y1iCz*U1lc@X`o z$?r8i(YAp`9NBI}rw$!;4^av3D_{C-zf;x#hEDD2encK_X?8x1NH5ZX9g(51h=^+P zp6RC=zHL%8%;VQ9?OM!U!(oN~b1m$8qQPw>CQ-D@MT&qdPH5>5 z(t}s8ZKwc`DQ$k3$Uwti|k@r-pvkq*a;hPzR*mCo~9bxz?u$tDnasJhz%=-}x5#pcIi{FPVZ-)V0YTS$Jmn+g4^) z%eN@<8ly<6jE8!8n!UOr#o=^=oa3+9`-7mj+b_MG-uw06PIuq>Fn#sem&7CS<}R!e zl15Om{TbNFoQzQ3jN5@WS`xq2(9zC&s2yjAXG96@+y6wt&TM;s}NSGMc$^u z1?4r6UZ4$&DD-~0HUgy)d=dBH{wnay<0i`qSOGbFM_v<@NVkZxphZD|G%4sYEIPS- z9WO7Q)G%^jZUrA}WPe=^1RjF&_&R*XH3C_Mww?uP*4YS{j3OA)LeB?aG3uf~vol7F zga2knjVe$P_DJQPKLGCI(G~UeR5Im>mWnWcH6#5rk>?bdQ`*y{A_H>e-ju zP=Gw&zXcFJLqX*n4VmNQ4<0};gojfDK-g*CyB>87sj&yIr5195iVs(=Xdn%3U2j^nL;v62snq_F&{D%2Ht*o-_}ZkH&YD!Or>P*V+d< z_e>9szq3$kHf}xh(j;#O?Pw4Oz~y~&_65L@5#!CEv$98zYG&k1Ul}lQ>_KZR9Yc;a zQ}(Fn7MCW|O-@u?f4+%#5qXQZvqPH86@Qy3U}q6+4HcY4oL0!2YGA&)H5t;f?T$1O zed%3vkCbudnhH)lD%Jfo_fftu1;+e#J&ZQvH%erooH_m~e{&I~|2lF=1t&mueXL07r-+be;fj=En6X6rRTEaY>5Ly&H?^?)?S4&>pdyV-0UG zduWC6Id+{2^UH>wFOajxY5B%d$e0@()>1+Tl*x~_aCm8THSHW;((bI2Cf06KG|B-F z%L@^THrk-EAV8U0ByEB9(&51Z#xn%68mD`Edr_=(g5FY}NV*y#7Ij)kWC&VWC#Z8B zbXG_sHPg7rQNB3ZCl7xtz5L;~)6Y2ay=kr{sdMIV`@J!|)b5#ulIlsR-hI}<^)$6; zg9bg8_-+@uEx0_xus9~oD^G;D!ydGu5qs`eSS$HVJtqAzQsj-{fA7i)qMzhVfJEf+ znyasIY6xH2*D$i~!k5t%3mc_-`^^ismFEIO3n0DDv^4+iwGrKg#{9l~rRP7KN32QY zv%(WmfXcI@WgpiGT8#$Afm;0A2#LB}=ZXAKV;#I-d=DCw`1M+Pl(TpTE@aJ2>%{9z z!Ju*! zMv3Fa9CbPc2*)t@uo;D~JQaojG$OFnIXO!Ql_Ly@3eKV-Mv^`T^M9iVpWiGP1{J!V zTbzR_y5z!-&;wVb8TIMXVC&Jn&0QYLx)PkFc)Nk~}37%%QGm?^FcPmIw`& zD5|mmLM3a5h7p^p;@282`fn76VObT6j2JHXugBBoSphs4xINl(fS|>DYxEGU(Hwm? zoosCIeeO*W);HcjC=5=>TRZ~&>)~RtP+~G=o;FkcPCothAAJ-A@2O|H%@y_%yvjvF z5|jtxdZ5?SfW7c1=jebg;V4udVfz^v0tCREzt1X`x3(c?Qj1| z`nCV$A0e1*NpeK47N>)08B@rx`Q&-}@bCRH#>4Lu9?ZNl(Y}pmE7&ziOkwB%cG>n2 zfF%gsB>f8jcACDPYHP3H@gk)FVz%FJ7ryEKlm_-0hLctdcPcRTGzfgN@K8ou z3Tc6r%0-1PPieTwQ-WN>EsjN{ZD9$2@IFFS<-xtBIfbE~_C`SZ=c>ho|3TOVyJQYgPF&$gel9`2K^muE*s9=xLM1H8^ z!vzncfcH$58rg@DXJt9|dMx2{0D&DT26)vf4`1idB<^D<)OpC5tXrAW- z!@qPGm;)z`Ix3A}4NRZq^%@3_*R-)0bfmoTyVBB&?@71*FJ63Cy3-4t8P}K=$k}lc z#XTkL3I$?BM6f%+vo|6VfF9=uPY&A87LY#Pd_D7-BMO4NHtoQ*%|hFx3h_T>oFG`Q zuE4>udHk)ZZlR=6esLfIpIzc*51qqzX7Tof_Z|9FK}x5BM}O<7)u@oCY{oA?&JYsz zl(ntLUj?msaufoQwZmg(ddVS?5YnT}1NY>xWAa^}fAIv+LNz<`Tf=g0iNhYGd&Yh9 z-d*?u4?XidKPA12^$-jU2pyju?BKZsj2&#HJGU0nTkmk>HOJRitOvoNU`tIq6{!D= zZp^=EAtS5kF013LmGBM$`# zy2Tm`Or%>LK#>@yO0q8MW@}&|4;;j#_g6Zz$MDFa zU*uMvy*$IS|JJYl$LZPDemX;^4cO0#X@C+0pm)u}XveO@%MJskI4I*zn4jVE0FXSZ zN{ZqFpv_dBQ^5Gv=60H1y%A|r(y|8}^wHTy@d~6vC=X0au*H@6zv{O*kOH}-_x}<- zsCU<*$4Sxv1ZC2%a^-|E3--((mnR&D_j1lU+w@>+Wa(Yk6RN&^L9GXYGaRA)Ih^@3}m952Ru)%@x*|jPZlqfR zGL*?3_~{D3+aQJ0He{!q8DQrcwhA8X;`t}Sz`ct`gCNo_-dcT{`WDi%o>G5+ zLU3W$YZC$7L7CaECyuUVg}5J;*9c9Jw>MYW^_WUB&rAVH@*Gd`|L5^2#lX&`-5^)%SRih zY43nMVQ6a~C6Z_S?d_*9Pgc06qSr_n$NO#mvp`+TTHxZV)x=}L=jN8_0g;xiv=46r zg&!J-_BmAODeMHxTKRhS46TJXRznMV>LFMRiA#qvxJ+p{*0m9ES}Y_Dx|dc(3%()q z;3Eko6TAoo_Se=zXTgsEXn06~%OFCL3B)ZP@EBPYnDoZBbG z3iyL;;UT-PsC*rV!kr-_cG*wpRIrq3XKw)oa}R-$;9A8YaML;;LU$Tk$Q5`WMZ;VW zU!h1mMY<9z9nmF|%OtQ*ggMR;G~+{egmQHX=v(aFX4 zRrm}f18sUpkULSZ#`t&__QGSKtl>L8P}$={H7wj80ij++!<>511jnYXsN4-D8sb*I z;c|!}3Vc19^I|}uk{(BL*By!Tih2jWrMJcCfnYl(>Gd&FW}>C0aPvLpsVsw7;&Y1^ zqZcvv5bTa2Cp>T>(z%$YpvNK-Lx)!Gn}f^N#Y-b7ysFG0a`t#%Bg`}fk1~yrEuXm; z(r%;%@V6(3d7XyRhyy$zOr#Yu5x_Cy7i73a$@oa0vGy(aMk4{+Awb$FGyc6_H z@Sb5zPe95OUUiHw0Cya2f&$jqSGg)@T~s?u=wl1?U zP0*~TuFv{8N4<>`MB8kM6_GD!g#iN=vJ>J23@Y9$(|+KcHeM)!^Kis5-sDz(_1Sae zDfCY?;?cXWMY!Eu%FSob08-QkkQ(*=um6Me)4!mXwLk>ps!D26`*@!NfLiHSzwTF@>6I}2`JSCRrWWgI^V2WVn;-ltDGbNRZalc;SH3vjwhfq&rgbEk%4goWMQj#q zx&QKWi!nM+jaJ`p0EERp5qpCrl^M+o~)aOQAg=br&}C!q;Ukj)B&VAk}Qgkc4*K_73Wa! zIL;rR8}&UrSzv2eya>=#Zbu3MzfA?eqT{=^3%dpjFlOfDJ)-~+9b@+%&k`^d&Lgii z{38nEmXq&2V$RaK2HiOOSV7PUM0M&N0yHmKN7uAMDo&pJ$iq$KPN#!so99(26je7_ zd!PcgLy1ugz2O3I`xRfIJ0>ZHv!lv6_m(ZxY4WVm6l*kGCk>bA_l(|iukBcIPD*Iz z31(f*KSquZ(3iyFkr_BXF@)S-4c`GBQF76}gtbvOL9>xgfQ&vP&CDp6a&|&v1Vy1E z$!LxJcXe|7 zf0I)V18NW#=N35a20^HBAee%{%fbnQ0EQL{hBb+Y8nRWW=>0M$Lxmt{9M-YG6zXWZ z9?a=)g>j*$yPMy67=rK1TzVaCYHYaHu$>_b6={}{zjGWFt0KVxUQG$o@UPEw1NeGo1fa-lZmw(?FmGFIm=hyl7gP<%o%0w< z`=3^5hs6$&0ztt5Yd_$`uPy`!i3AbX0kae1ROPZ3Z~e+QDYOHi@!mOW&cI+igdZTx z{9Z*y0z-f-*X!GV~V33Jny4XLaAab1gzq$0t|m z&TB8F!tec%rDKg)w29!?z;laytLxnja0pHBQ^07wR3p6IGIhfW!Q<}Eq@}+ z@FXeddZa4!VUCg!0xOH@WqG#s84?r*URq9Xz4h(%(zm~bK%a<&`_EBQL)K`ABM+mk z8HyG*0I)*O?Ag@qI{@HKgb)S;z+SKB7~xc?85+nu>a7EEnR&i~Mk^Ot8&C){t{vD`yN?%j)5g{l<-(+~(+aFUV>SQU z;>$jvN^@B4Gp<+2+&qi4q*oP}fXyOK3yTD>b9ii}Ta_P=3D3XAo%zp3pq5E15Qv+S zAurXDcg-$deZ0HlDrJs7N6ceTVyLQ*@Va?&mfo%=vJNQ9k{U9GHc;d()_TCvrS54R z@Len%2N2P~ksdt;xD#L)SqwkeD|m0yNEazM?6bP)<+tCA+;vX_T_D}yU~7xcBoyZn z#q0p=f0-WY4Gi)M`)11~=h-@@vjDXcD*ubdMTETzMd}UMJ3CQKkyq@AY0CSiP7~+my)H$DJ5 z*O=1SSN|{;WuV1y7OGDIMCDJvU!aImAa;T?Lz)rnDRZJw9$HslD{snp)it*4jAJXa zEI-~zDEawdi8_GNwX^-E=`1j62KWVU@U4020!jg{Mx-?^VjyKooH4*$EpgFz=xni4kvJkHF` zM+8<+eR;f?j!B)22o&Qon$cp9#6xTZ`RnAylMbWIbzQ_*r=}Oe3l+xh>1)bGMW4S- zQPddFiC`*Hq@eKk1nHtW+vHJoo=y;>DApI*xl0;q^PC#*MQscH^_2`dW|ZmjjK>$7 z-Y^apXcdvuJkR1abyTgyv#F_Svt|v`S{aZ9uRZ2u%t=sKPVqfr2}=j*f__G1j%p8n_F-FKr!Yqnt~<;z5g4b)F3$~hLZDt zEL0Ch>jM(meiS*mNFZn%SYzINo*edXfB*#Fhd@Kw9V(w(>LCKFFktu9RNC(H{;c7OvXb9CQr$!;=$9n>E_8F z)~kf??qB~e|7ZGxfAN1Yc3T>u23hMoW7DgpK+p>B!W;!UE{xqH1%wm5tZqjFXkLbJ zFF_|o_N@<%SP(P4IL~Qpd*op@QhiAN-p;es+I>psZ9aYQ{ohXS{l;&kg*A$mig=VT z08Rz_bm%w(NS|W~+naR)<0+E#QGSL9$C-Z%?=iBRRzFKvkpMZK@1YFlk7-N@*-~)}ybRCT z5=NRc%;W!R-Dgposl4O?Og+vx9cV!n~!Pl zWihmU$&dEwUkgAsyvC-)ykl(fH~*I2putXp;azA28@mwMRP3#CK~RstnjCM z?#oo!g1?@4|SIn^n52_UHvcl{_!7H2mhZXVya@X0^9$SnBxOA`hG4;V~;`Etnrxb>WrH|R+( zmmWAh>c_NsY_Qgc_DL5Lnp!JXF#>P`<1Xrr~0iU9gNa~E71 zMH`{BtK8GPXW!EU8z=t3Uy`?}4UW^Ev=$n^h*s1|X>cFS-&W3g0;|yqd+;(?bwY{y?M}{u%%p6@qqUpivEvS{7(pI6Vm{^>1~7vP;=u#}Q8e zl%8z2=Wq4Am_kOFUs_-c7_j^Z{3j==QxUAPrjztsj#iao7G#Gqy(8e{+Dr73ohHl| zmM)DN_#M;UbWnIQn~|_d96b#@{PMNfC3;u$d{c^S#pZadxw5TYmVUuuAkc{3Un5vX zx+XL}n3H^`9?(d&>%!h9X=(Gzsm6H(7ISMPnm$y=XwsOAgOylsF`Fkb>R7lUZNUyo z=JuP~CkqMet>>~^5<-Tt*V7vL>^pz;G1wA)hE5UWn~Gp7#4?z`nv^b>Ce~5e%D6l{ z2*Qu|)Ny+61>+%m(FZII#)F-?U|VFD4miEo#S4hFhyb>;f}V>qH|a-60FNYJ)-|t{ zBr!y|lm`x|&<$@a+Zb|yYjWJDg&>-~c`E?i<@Rn&D3m%vcmiVRf+iSO2x=8?ze}hK zAZtr1i?;L-h9KrUBbY^Qio8mGL$0(t!V`;Yyowb$?9l^py2M%Nj{qm0N~V_y=Ec=t zK?pbW2Eusz6se7^tflL}^y}&Rw|+By^1uIY>HKJ$aJP7iXLHI{C~KyoCruZ;z(xTY zd2T8QCke-e#mrm<6J@^lx4--QY5C6WC?wc}a0GLD2J|rZ5I}MCC_HDl(-6Gjag~Bz z&j8dAzAaTOa?*nk#c`~mC)?!t%(Ljr2=(cOIoO4^A^_qtOeR5k9Id#(P}Y25o>28v zI$3{8CyH8Hdi9mGcXEKxn;`Vwh{BD9+bbMXO#2o>fzkhtwQigqraH~Q|MBnq4q;i8 zWLQcP3ZW=vQdw(>@11{7?Vm|C|~-wCEw7fD@Ypoo(*f zBd5}X67m4j;^Z=blyks2go8w2yg(?YGQ`{G-z;m6j?^VUOLuRVV}(%y&o0u$=|L3b zl~)(j{SRJEZ+-hal!9=;2}=${FeX4!fkmo{LWpn3 z*cvq=l{yNkd0-)q9wdYU)CL89x5>4B&Q->HAWN|^acQAo~J^B1LHmdQU$64Gg{B_ zOg*Aj29M!U&%8_-6!pFqtSM~GKQ|{#!X6XQ)3AJuh{gydVra9^c+9bMJ#!SlDON)W zI%qv8JOhLcj}bm(;UawQcVragmTTg(2C05@cGVVa~suPBg-tyZb9`Dg!mPG?FXG8C&)BC?<`vOobpXKqg-U8+5yug|oQL9b~Bu`SCdFY)p%||e!C*u=np$Niws~-ypOr;B>=)Cidh4wK?j6O z?&+0mP@w$K!FOX{Z}q zA(pUn1A3Qz^oJcI+Dk_bc&$OI#U$ z+;@`R?dH=ehgKIBF|1DDC*}e@$xjx*?$gJ>OKNH=-7lnSaYHU7_@nvs8ELKXv*+7aD?Bc2U^!99#u-3?j;RDAhCv#R;#mw z_lOaO94@3szw`sGKf_05ep~Hd&;!Im_+UfLQRKp|ga;I%rUy(fkH3TIxnU8-X&OY1 z0krD{=xXN`sP-cHc&3qVgc*zh#hBD~FQ zEfOvo<~wQirJKC3m)2x=7eV3 zrO>0W<_JsNyqh8xU3dZ^L|-1m?E0!Ow8-6qNVPyH%h0}OkJY(r*AYs9>92{^etxzK zIrlU-zSr2k_z*_z;hEQ^uNP5ou1&_B%hvky0CsPD z@FBf}Axa3(v$nhR$g}UK-bOtDT4&P2u!DDs{r$uLI>-ge~#Zm--whN@tDs$qrT0 z3+Y$B|NZpd_kSy`l1qFFAZ!CJwvMQ3-sgBrylWflyMRmp1q!OceLU7wpSbyG7Xl>a zdSmr^^wvD3QhRA0(7pK_AqE2jV)U%4^x#8i2*4o_LcsXaL&@SHKqwwSVY+tVIfJ3} zl=)eBEV)*p7@^s(!Ghia1+1U@T|>!{iokkGGlt~?n8ecozPtxD$9)8af=jO#EIUU3 z7L+$v0a3>M@T?vlzOkDRQ-`Pr zNGo_DfGzhgVc?*Js0}cu4LXh6WTmagUKrNr-Cg206+jqL_t(~_uE`?+fViJf|>pigF>kW zNLQY)R`F0D$QtM&GtY0p_-h!^dQ_B6%3zHzzcMc}9#80uPqD}3X=3sSV*GmJ6C#4FhS|bz#4Oqt}1zv{o&2u$}U0|%ImDP*& zaz9T1qw?bk5feR_KD164TP16r>^XbpT4?Z(V7S;eYXT6V+*SU`YvvVtM5^~!*2Mua ztgZJGOzGJbJX;tz2B7d+rrp>QunEscZoAj=-|hpB$_Ahql3n67$u4`i_R%9`8OoEq z}hcV80lk@LmUoCQBa3jHNX3CHSOQ~Al<%t z32#3|J%rlZgaBtin7?+nH}`lZ#us!t&0#DzZ(W8C&|d&Z^@)JX(67XGMnTUQM#8;M zw{cQ5@aEFLy>~sVSJa^(3KwWGEg=JA+&xr(Lfz#%Ya*YkkW4>&??! zCoOx7uJXVlKMBN>(R#>&XyeIRj-r1(mbC{cGzw;@{XVT6uhRX*njOKSXhfY6@Q z(J2ACZsQEe0boi1T@6c(5X1fE)@!6kaW-_MlR_HzG`TZ$tOa1=4jm2hqR}-YwsMpP zr_K+H!<5m2Ih_Q0{*~wEx4Zr_fQ~f{J=+}Wu{5nm{|a5*y= zKNuk~cix-9DDJ3A%aDOD+f!EJ?m#pJ1iWWv+q$So-j_LsDxN;?*|^^1_-HA_bv!+U zrYb*<(^auBDTTL$uGdqPZqj$;1oK9f0;YEBFkO0bh0|fM;0RwrF?dO7hr!}o9HXab zl)i;~r-k&yH-00v@gffEqys>ZLoAQaeer9gTXfUW#u{E4p2LPjaypBq;D{u8eg?2* zLG^ZoLSQ`tTu9UqzM$ch5JDSk>*mwXqythq45yr-*bK>-Y`uB^0hVqST9@e_GiG>r zxv${Zkk9|`WximIMzJE7uiXe{iB=*IlHsfl!LjcSyESQRkM{|`TgJF83@|#DPD=#exdnGHDGhs*Uv6 znW)c}BY?@d3~vF<&R+YUgjl{fNmE6PmD6?Bs*^6?x|ROmKmMn5_ZzRK1_cr)eCAva z2WxqTRlG@MbrFHFgP>D6%jiL$LG7a?O7#dy-#xs77gPd|;_=AC z(1J)6DO((%LBfNjU=Aj%KH{&(#!u!gH_p=O0A0EG7n!X z*bpQC#XK7w97h*_!yV`N;s@h@LKH+X?-1zA=%d-gd}I1C;}26!bDNwJt7 zps>1Wa~C zYU9Z-|jy!i^P< z5^f=%*(>Fk{l%qwuJ?%*)>v=x6gjzi4Goc+M|;6K*xcWaGp?nl^jSH7f^LwPET*#a zf+wtbz4@$h{BY18VQ|Kw;b}?|GvPge3^bk@k`x3{9H-|{a4fj6h)rNC;4^Yp$78&+ zx{K$Rm(iq7Dog3$nnfiETr*Em*~u*R}%wR zj4PmkRxM=npcVnFUPu<4eZ)J)+PXdhL#yKT%*BBW(D3v;-s@eRCKw~MhnmKCSbg&+ z>FV7tqQ~_TLi+%5X+a?4VF>~#zeWiWhZ71VlY z(<|r^v>rWdt4h1f>=1n$qsXfS4p6>T@mh*M?AU?lDrBks&6N?O?5p?leQR(gn5TMP z2S6C<+}|!krpcH$FZeuQPnBO`aPrfGHROMFf?R2y*j}^}q{uPl>!* zSgFA!4?0%8rHkybG@{JXf#dT8x;pag&ns3?9+`J-nv0~s*SgqM`d>+Rv9l7}4Lz%Knlw)QL+IX~)@=H=bL2MjQ<-mM$>Q{gFaW64Gb|?jkDTPb13+*e<3_z@N1Z~t z{k7Nhn8+l|AX|KdcvK;>;2t4ts2fd>Kn86i!2PyQDtBk{R8#^GLbz)Y@i2*}!pT)g zttiftpoR*6!+t6z^MTor$dP1Gqs0Wwq-RB@iX>$wF+?bsTdena6brPB;4tBQc6lt_ zdGQYUGFb0Ctjp%uibo4B55ZO{W9h-i8|mg(Ugg+Wa=KvZgSGW^fdJEk(4@-$Yp=YV z-u%H|V<8~yS;kNXEQky0!YHKUi_I65__qR>WsVK~>`FE?;Vs@umv7(UV1wmo88S(m z5v^nMbT;uExHc*ob6W*Qg0QR-*+MqbGnlhLUm>D!A0h~spST@K^1f%jv3YOy%~iR0 znjGYkCQ*<(k2n-!b~arA`b_v{iSV+bSm_>N+Q|h)J_4w57<>1@!?1?G@ZDcaRdUhn zCNWJ8=6i4bG_5F1K^qqob{X<8>`-Cc4O*1_-Cuq`z413c!ZTtHm7Zb*E*{$oLjM9q zqp^`E3^uxalQ1+zkbrCFXS+C$(+qzL#*ZLvNJotkiaP3~G~`a#Xga-}Za#H0{n{V? zQF{6t-{jQ!VtPneau*?din6e~!xW%nXPpz;SeqjR>OS8Mto#aK{>GKd2<)@;=+jk% zCmuoQRj+9`1U(hYtidcTetOWG9S0hx2aFvM%Dx4d6~ID+**a&%X3~Bf`hkqo8=^o^ zDRUvTfshyEa9_a#9F+;Yz$*SNpyTF@L1+~Pm(YxR>wAV{qRkZpcno|6p8{U#&}*Xq zBEm#kQ+bP=HOizs3wAuZ(OhYp6ByPM0BT5xp)bhq4Sr^9Dv|=@(5q`M6Of}R1&FcuN)V;I2@fNc?|BR2@N#KXnC_%7GpAOe-v`w8 z#sLT}i-6e4tCWyGRSeJmG3hMID&`Y0O1(YYN5#}Z{%CMkp~ooT;O`L>txFyE;Zg3h z2M|aX3Tox3A-(Yavl7ZN4TI8r&9Mb0#MVq zEFv;z&q6p)TJw+|fs&Cs=i+)rZ3Q$ris#;FhEW6Mn$8a2nWR1TW)je6my+%K8>Dk>Q9!&92R5v&;^4qbN_Y^OVi{QO zqrF{d0EG-KoCDBL4fg|FCg~Q_3tjm@W2_pWRXf;Ldm0q z-fur~ExQ*oeR^>+_Myr_EQ$&9AnoE|r1G5pMEF2Tr%1;8!H8j*0IChjH%8NGgvCDTDyYQT{%}AagY?O2g!~LUn27r#F1tw1_ z16AuxzK-k=$jyd?5UJ%T)Cl*aL6qs#C+ zN&~6*rQv=kgKZkd`|JIv=q)T?qyE7 zYp2nsape)rXJ;oELz9fjD5D3(kZQpDo+5k7=XqezuwGKrE(FoiqcmTJ$Ms+3MBF009r)^t3S;7h@-<2SM(|G0O4IZqENQD#KS3D?U5uBQicnC@dY4^!C2 zkX;WRtkYSE)JYt173N_Im~vW&ool6T@IDG94FTyBX~jC>nMHpXCDBQt?6NC`Uxz!K zsK|K6z(qUneSo}1)<>NWMmh70?k@1Zj;)w4YpQ;L_$Y$ZQ>|=8UmyddM`chj4|`^Z z51o5P{qw9*UnheyoG~!~I822BGvbOxXiIE5kr9GyL^~E2FQlV%cU4$Ss>5PPtker3 z61f`C77Ci1_X*jnz@mq$s1Q$22d0LD;<^|Z17`Z!vl*nGCqt`|1IUCuui20blolbv z5@u~}yp(Q#?hZnnd>|~wE+BAlMhFBUDQKLUy_6o^-$^rfUq}bI{tJMw4q?@C2q?e6 zNi;RU3>^r5{*OQ6dXxv_S21LvRu)H&^?5E_~=4-R5dh^*d#Ov3>E9(IfIl^+%ktqi~EWRe5)pEr=CqNxNbW`^r z70Jn5ynZEp=MVo)2)>))0Xj-bL<9+H`;z!_+GO7zgyI!cxfy$c!R{3IkM`s-n^}l|#>m;T}H5 z82AX+#%CNcuPLBh0);kj@mB@}VS-SKdB$sJ5b>WtOQqv^_0@r)QUnp+8gCTs{_r|l zU|>FT7>Urd27!XpCxubKAoYYQE8@Y7Nv<=O(W9(|9|oIDH!&wmPh$hVC~+?VSX-V` zE^tSUDDQ0AjTP*{Xm|veFbUqu3LYVnVU1K^{K2<>I~+ko3@ZRdgQ8h;l}*d=o(gLD z(Y32GN9o`kp7K;9w-`p{NMi~-oGbT>E)2*4mGKZyT^JtR$kU>fbv^jkJlYY$-(iHn z^Fz9(7zOF5w3wefCGO>>*?4gZ#8|m!ASJvbkOkk&+lR959&QsKgAetboMZ6&A?Kii zQt7MMmBkugM!c-f(=)4Q##9yQOmOL*2(F|74IOhh^>j--o@8kU29FLsC4Y04)TKNi zNg66^ASmBia8nh=EF$G~L4y5c^|q?WEoxJ7>dCSYO@OPc)zjoUpWuB|aYo7&`oJ)* zhgTf$aa1pB8%0^4eO`I2ffQTHT+K&5!{8ocj3q${x zKjCw0>Fck)ga>se&2kKLr`Jf+Q%11>jI?snXc&}9N#OT3Q9CW4IXucn4eVxN{-_>N zb3#u!JVzr@AR<^_q@Qn#HQH~`SDybW$hjP$?a4(B|6sklgpM^@q;J_ZgNoV!lcexfYLoG%QnlO5`fEQKJBk!Fv zPUVfco`Q@KbXpf6SDw_+g07C48~1eHqoi`=2b`)n#$JOb^xjo)aHuM+PZ&Udw%!u# z=^->_Lj74sK9smlfH)v$Q;DFSspFJUK1$n>UZxyp97xS*o8>908g1sFQfCji&yMCr zqGjGy=+)d|LAYHRdKjlmI6#i^MhiqeL`Bn~V{dqP(6v9{$Z2~!+wRT!B2x@JX;h{^ z!w6bh##sctd6?A60lB(>-Ubl|jX3#cdwmn5Vw_*;u}u2O3Gca1j#2tl95ui-^2NFB z{#XMZ0Zo`cu7`9$7mO()6GWx~q!Z*l`waUg>y!|@7wB|kp{#|79df?MIn_@(4+k1N zDY!l-y+ldtIBZ3#Ls1^ofYftrn?;=xrX_gZLge#vfBVgM(8coKK4~)D{(V-PJwylF z*~(5UBLMsn@Qs~6fHdq?78MiFo{<-x3*3W@cLdDcVq59c)z&G}X5Xr_ro>?oKnFvf z?><;CY|4}C=-eWej?Gtfm`@z%J#f!RJ>#d^Kt6C?1i!Yvbb&oYtIRX4g;Mv3-I~xl zIlM2w29>dacuiP!4~8(;CQhhel6vRLC>Br7kr<13b_d)*yDO@%sHa}^Y%G{oOrI&6!a|>6z zObxKavNK6o4-D!jUB7-CE0iz+{e8>xS5p@OGdV>cL^iMr0e4`eF&J(Rh4Hh${!0!_ zAmjowMOcJQdHfY1h_MAwWn2=6L?hFOhc>1Gdov%$CZij2v@^uBFTDbABZrGHzM=K4 zd-qv8gr(OqPTSz9DE*W_{dOJ9=hfpfP9yL#78MW);LnI4fHK0{fNzU@vM~h8fDrQD z+CvzgX4l-Qh09-0MC1>$?iGqQEJpD260#o9yI=cCnt9@SI_?uzAq-)7NPqGWptQKK zl-BS>-+AFK=ev)loqG?`>woe6^vgHjM?qqV(qYnG%dWGyFi|LuVHtX^xPn~+tE*r_Scg5EH+V^he6$C!I70P?Is6Y$}& z-UXCWv;~y0C0^FUaS6N?n&KlK88rGBBumGUssqi*T@h(u{UgPI4>%?kfN$RaOo$2I zYk2cLez*s)D^N@ZbOHot?O$?jQcQRrOosOqR%S2)SPRzjC1NpWthLH^)?izlebtM^7Vv_Se>fpY{G){crJH41jsX9JNkd zd)LUlU|%5y=8i&S4a_UdJn+gUB4&&)_R_6qKNS4d2?$u6n+i@Rf?vuwfq}~*@HJwg z5oKsy0PI-xPO0#fX#$EmmE%T7)=3Mf!0WafaufZ$hes&?%%2k!_&kBNX%hCP&I~2_ z3+2o>bR=O%4%SGAHhcl-uLrX&qr9C_XO{LId(&_-phNNA8BOJ{CiLK z(u1b=*ZUj^44BX18r>N1ewIvMknS;J;&33=rv!kk+$z!+$fDl!0QtN;AcSQV_;OVwyWhkV{9fJP6m#)hd1I-~?bMv9~d zh%_7?aAO)=eSxSxlx zD2LSdXQToc-c}Zy8sT9WdWLNmRwe?V+|Y}vGs8LOSgXYaK99Ve#we=M<}M1BFpQ^& zDvq;Pd!#cwTHPSpv4Bj^aD!!d_L&b^r}h){@Eryjy2bmOM=zg@;juli`v$yHH;k{o zKp(IgA#0@>y}`;@QV z<#x_n;Ln;mH&Y|BXA`~VgEPpXGxFB;{3@eNnXo`wIglORf?k3bP1i8ZLnG>xc6dfT zZS&T^Aa%?hauhOyu^AOm9>l>)j9q$fU=Ub$;of#J3haJ_w~;(@(ll6a9Z6vTflth- z4t=cbfQF+UOnTrtb%A5`@D-gO;Sq+GEI8DQKS^%otsdBy??>ZgD4z3B66AHhh}=5-g%1$1tL7SkAH zHi=&fHBns)MK7|}nx3Zs_>4m!tbls>_FELvJ(-rCdp;cw2%)36!+QY;6Zo_U{cFM7 z&ZJM?Rpi^brrZ}CLD-D~FhGpn5qL9Qr%HL_g9j9=W7Xr0ouukAk1#vj+u&rer#N!5 ziQ+kjnP($s+T5f#qEN-iTAx$6GssV*ul=jv;O1DQeLUXe;It_~>yk5DV{ZnS#g!^2 z?Hq2VPu}q7={X9;j54HV zBMPc&FmMP&*2HSt{*VxJYb#xU`g7@Hgy`PnUGZx1laUZ`-Q zA1qe2O3*@`5MGSe&~uZJo64@iTIyLIArwRHIHbl{@IWdo?fwBDBb+gCNHIBK&fEL; zFC0y$w8XL3VVSkovvo=(w!+%(4AP;gHvWxTj6D`u=7g@U(cDD$o0W@j3xo^%A*|q+o?sq&5P(JT5&UNeUXO4S0j)G~ zZNl#L+*}Lq8J|(m#8}t>*_&}|Fbo;1VK^tE z*G40HpEM&wj3Mxun*dzk#N$zvaoZ3dSTd|T;)0u1yaq60{-AP#4_`k;euBG1rhhn_?}&YGz(jYtt* zN2kP#ye_~s*Hmv*jm|n6$?nq#sVjPGHEIMU0tDAy!!PEGOcQ9AaTVINL2wcUM#@$0 zVYt?oO9I|$Lek0$XmIU1 z8PcWhqfg30iCTiqM;jceewuDvU53{$(igw}73OgiK)i%&xJguMjKbe7JgQZmXSasc zwN1jyc$UzSBNSS-0M_T{MEH=QLk=A|74WhXg2)c}u5q#?b2Z9&2slCTMoNsjSS8<~ zOT+>x0%ri4CTU+2JZ3h%PWlYV@K!fNn-nC1I-am~ib4H~BBL~XS(fQeQN`AHHyIi65T%W>33kFZo zzZx3STi;q3=4mKj&#uLpBY0H>z2;$?;wYdsjYg-EG;W%}aTI6v(&gz((OtnjZ6jf> zm(koXFj+?pPg^VMe6s6^xvjP}JHs&RQa~xdQ?ICJ1b60c+ucB!qW&wxn?^G27-Fgj zi-gx5qaMCG!f?`XIiwJ9j8qk!9Y%F)@cRjK^qzC{O!l-h9WOdO1gb8pvR^sE;j7Rt zouas}#q>Nj%@R@8Xg`H@Nlt_eky)&9;36VGXGCr5jK#H4o@M})8^JerCQ%l6gtH#o z*k{IN6jB*I${KpuRF3_%KB!9?UY_-eR0kXy0%S3r=^i>;l%F~bVaFIFQjED|JRCAV zCXcG8= zhpF^o45T2(&XV6K(e!Eh&*yRCF)0Qa-^@mck0r$iqRWD)y{Q0Xv9m9hm(iKnua~R^ zgPPcFF$`-*NE=~+Q0jHh(-X9Q>9+A)DzKdh>u2Smx@m&eS3m#9x6`xV{LQpQD7z1# z^dS5W%w`e5%xpegXA|H1;SW=DV?Dy9f*mWdGtaHOo+&{W%sd&vusKXJoI=?Gp$K=g znC9lO*Pj8jkOho4pI-mo_tI}JucRuiUXD+9LU9<@=^;pm0Q4b3S4$;NsIre`sj}+v ztR)K3=<6Oy@}r}8xGAE@GyW25bwnv?x$qL!?XaMH@w>k?FxC z=6+XE4E84#{5Uqf)DehQke+rWo61C?Ca~oJ4iWFQo&&Dor2xwc zHK;)TqA=bg7}qav^=J1Yop0auM!gEi)^h=P?Io>OOnGLlkSc<~%71rr;s3P+18cThLC~^e*oW7}HDZ zQK}Y885J-B6*R*gRE8d1T!pqf&}i&SiM>%)0krDOdqAY6e}u7#=e(_QcMfW0lyDhE z&~ZMDr>HSSRku8%TvEWhekxbNx`w#>qLLljYms>wRdE4qLluW6@Hv%u&mlgodQ+5q#To9SfdS{Rq+~)TdV7g9Wcbbqn|5s_JH%~@!zTWdh8&Y~kzEo5 zCW1f@lh|M1rr7OL+OvQP9{%M1R*Y8=fhGtb19JfkDC)AvjkOR2*@IpHEa2>w06!RD zW6%g&$o)sj8>M>={N4fJSW6?Xtbh0sbivf&U-g+DE_?a*u5gzQkHWAFPkSHHT6aqzJsi(>j34-1SMkrfPS;r|=xx(E=_= zb&V&0M;F>RLT3?~ZUPTzF@l584N^WwZ*{p}$Ua`PSj&_-O=rhCh8OaTTyVj$sbLzH z>OsE*o#U*thmAx8jlB{VK*#|!7l=$qTl+*4^!P$1X>D~o*21&_4Q3t*EodM^V|vl$ z0oMoGv*^pC#Lcypbk8v=$(#i#Z)bl}PJ3ppt2EEooHXC|ev#z4Fo&#+0db@~vw! zNuTGVJ!&P8O-7V$t7v4&;!fLpDKCwH7>(j4tPOG$&#e)+NjkBZ?`-e->H-G42h5-= zOrx*`pB+tl+vrdLqzww7GI%g^a`wO+b@egN&;V8+=Nu>d968^tV>o=E=>hi@Lv@kg z0#;iF9!lG`5#;p&1@h+Mw#A1$AHf8@DAS}qxR&`!*UmrkyO{^VBn%1XXB5T#cYdC* zIY8DLtuor7%W zS#TUmxCSA@BOzG)T$TIvP|~JLn51qpe3konf8EXlgrj-HGOmX&SnZF`fkCsqJqWKn zNK3abr`fCIkkS4N3JwBLNi({1Y57um_s#dx=+d>+kg_4nvja}AVbKj6nyYr1iqNf3 zewiNr;yu6-;)drMt`sc0MhXePw15zk4xYu$ymcyhzf@>hHDCsaBP6N-e&(sWQ%4ZIGwKmWP3{LB+rHCd5{CqW5% zJQ=CPYeo9?RXD150_F1d-@TE3^^^DTOwEwTMedlwim{eZGz|(2v@$CwyB?nV{1}R$ z&PgzM$^{ASlf6`A48;)&*hpQd^-)-Og!6#x0>=fv^xa=iul(tsrrsz*f_x<#{F?`Q z!Fpug4YrfdYjw-O9R!8UV6Kz`?9$3Idu)Wk@T*5aLJzD5PU&b=US$I-yca1rL6e5i z7#;v*8s(r>I71l89KyZbJ@y&T_W?aG+5bFX*gQgZ8vuh+Md-Uv(TM}P*6aT`oD#qd zKrVQnVH-H!X9m&OI7(#dn z^s4C@(^Dl)3Yr7du!kPlP@`hjG!8>wGIe$>!iMLXhM^ZSiq4>MHa)yudc_1wf@3QcVvGG9cok!qwlO)3%Z4&Q0fYxC*TF+BA5-umu!`;s z2zhwinG{Nh;l{iit6E_I&xlpb0~Q#AqPLl2Y@cF}Mh?1vrQ92lc?qdc){U&S%IFqI}TRJQIt zgqAU+nX3jb#20iY_(5ZEKjp0+SKcF!nqL_c^3vCshgHzLgWTYAe8eKUFqq*VKEUVL z4`iJl+bk6z8@qq|B^@ezq!<1Pe)IDJ%`gCXJ)%y`M?PD=x&U}1e2lUl64^N;G-?FU zez*c(Jy^;^<)z-$$hl>V0#lWzDH9r_+0nhy8#NYmX3*>FmPWA+<8{5%DP`o6^ly~V z!X87yWsC|Hy7%!ty-Wf)Wr&sK9onU71X-XHd(C`~@YcFcrvT0iKvkLXmI0=Y%gB(x zCDQj8t9_PF@dlOw7OV{B$fbJBjOwfY7>nWKX)`3k3V`h?EaXp*Li4FiwLn{iy zp%abotj!dlhLfru{p{zQW_KC7%Xs#1Zx4eLqm#84)JA6j0KPn82N)KgsMvTEI^_gO zjLp8M*4PNhO_NiuKrXH9#3)+JlZPj|{dm#{n%hKmO!XdbCf*o)n}d_(pG` zseO9>jiNhE};QuYLK$kVmNtT{&Eb*7&52KMKC?dT$HCGA@xFlHFTvi zMcNs$uo0S44(lkhkkj-B6@csN5!n`e8X;G1>3NObxIEW15=GY}Q2}v?dAkB?mL~Q; z_6nI1kx%wb;cE0uL%?(+Ta229;0jq_W_n!6f38#4HjTUhcui5!(V$FneRROcw*iP_ zj8mc0&N0MhpqE(+KrI*?VN6p*F)HFY=xuJE`Y2x0t8e7d0!5?YrlQD<7p$)lNDq5L zKyhcH5JX;$($CI~rS})kDyj2gwf4?HF_MspEH%t;fCDc8SahOLNy_a z5p>J}SxMAS1HyE=HZ<=w^zHicYeksL)BxS#S`rHDDCpAagJ`+4k{zK!&K0>S0o>w| zFcFXIK;#~pAi+o|4j_7QhT!I&FtiPY%{{V;KJv*7N%FT=fyB+j5diEkuW7Z;kQALG z++G6pUjewqk$YH{Ei9upRoJtDL=&bL=T_48r(5hdR^a?{+M+E?JcG?s#c82ky1bZ9 zHy@;5{@v@*1I{M)vBCV~xD_5M0To+V$83`GewM~Xv62Jl?R+>&0 z5+O`QrErfhWE(-<*=?pb{`SXs2i7@7b(L zNQ_q+!#u!p06wV~2$aqVUKgNIxlM7z-osQLHkn5&z5E;BO)vl3KS=8chqi=_5U^6S z!Ts+)+)Rh;WrMse63DvRatxrv7A7ohMsu7);Q2!*H_K+&d5pyKD?8*{t^yh>H@fk)X!E-`E9_?(xiN%FMz zd5npFBma-P$A}rD=|+O9$Q6bctcabzbTf>w9z17bbE=gnNF^td>jg_=78W)zBxGGW zqF_P>Z*-9v1)iIugToNOtCw0J?f5!?&G59buF*CO8f`1Yn4`Tft08Z1!= z$o_b~JFj3ca=G%4IT*DHA*zsdZv3amR8OG_#C_50Bmb$9%|llCC}_hA6tax#%*#_58tQ~u=^;zn4<5@? zWORufUWJO2;$EYC*U1*!}Pt$Kky6;0hX~ z@5f#!5862=Pu=b;@{rz{C>#Vh3J6r<_UIM-Iv2fl@`0e+HHD@Sz^GH+zGh+c;G;z^>nU5~GBc zpsa}}&e;;rKE|H-7l-LER&yxrVO^y*W98P9X^Zr(L-bYj8b`G2MV$r6q3puTEN9f& zBaPT`JbtFCd7SJt27z*Kigi|oMOd4?RyKD58B7JXz)O!FZJIaA=lzl{*Vi`EBfdwL zF3~|LOItA)TKWLuMt-NitB2Ijk;@8Q8~ODB5kZh%<9WQR(z@LYEVvpYHSUD<(7P@c zQRbK)Gcr0EdAg?IwE$S=>>54tXSBG2j?7opt840kt=#J1hDbewmP|MB;3%WL>Q8~G zbgXCA@wq1EmsjYuEYav-(-SF=nZ{;#-{XZnPCD}LxyG~}<-R-;vWNR9Uz9%Pq(^UG zo~0wfNRnXIbPC`1Jx@^+L|f=?s)Ku=o>Trw(|VTWdA;Tq$SDJ&O)0!)q{{ZN9vN;b zhvUl(S0V^uXd30$abnsi@&=B#;D_x0SC@dCb4>uh;PlKmQIjg^pzkvV(LRxwLwHyOAS49EjA+h=u1snYB<|ueoST3OXPA-It zsPYhqxLK@cC<-@3G^4k2=K4+2ClLOunTh)i(hw|S8%22RlVafp2e5YjY-Wq~p61k) zAq3jtKEqJDnRjRk<`~d5VG^OW@yR{rahz^FeK+kAGBlqRY=~kr4-}CDBN_rb-CRv` z*A^)n1cb~_rvfPsKw;*JR}yQ^LnCC&5=k>Aau=J!^v3`E0U-~pM%s%D7Fu__##ClL? z2&GNYH_~E=7E0le=Ia_`M~vCfzf5mYjHw`cXgq3oi=z>DH+ES3Y~Py)R7J!1hG6tC zL|B(0Smeg?Lfe9e444LRju3iW_f`VRZ|{6k=J(SU{m12Cgw4 z7xi%=VHktH$+%QZ0Wh2+*SYL$J)glSg0#)QR#9>({7ZVt_%z)v@b1DBc55ir0R_Bf zt3QiYD-@*-PPQ30x2H8-4~4IXR`h7qb*^pxwhG9o0*Zp`WTEh|v7WYBl9uHoo4U)_ zHQLLm@XILs9tI>@h&6Vu42f$B80Z~3EDS~n2TF(B(;qM{0uUa;SVKamJ_aMgCSH&G z3yzovjGYNl__gJ_0)Xoi&?ezjZ)QP9@H#Tj5TM=;*VsZee^;4WEMi2!*X$7Em-17C z-!Q*xBM=soSvXYZGrJEr;Z-vo83%MAEx8{RjVeHw8*FPqT)43w+z<|CxX?*#uEJU1 z;7?u~^;ZmjDxMPD9M+aOXiIyV`qOoFCxBwzrvh<2Uf zIQi1Iz72pi@_;b|Xg$T~!o*Lfq<-{i+t&ivdh~aBieu94BaU}`$bH<_GQiXnDcgRH z;H|at-3_ggI${J=HdVeZ;Ym9NuoE;(n;Qpd=E>)g(HKU0)ieadt8M$UC5|)R1n6=i z95pK*QlJbn$20Tj^HA<dlUnokbQ;BU>kl(CV!d zpsCO0*9rC(6%yXYQS`kI3H6T7DhqzqFXkYdW;4OQnu}a#p2vj#qk^A_DJAWvyt}g- zJ&~(3vvi-J@Rq%^zQvxdc1Q7quIPmheG3pxZS=_R$A+D4#%zV@K@IU)fU!|L4FLD| z9Qi13DU*6wBzvU9LGs95+UN08gDF5S>ntDmnmyy)YZ*w#h9FyG&LfpALi4`rCTZ8U zUyy15&~v)3ct=kiJ0_*h_l;&nYcKSc`b3N*;1q~zC~6dsKp*m(PP^zK%{&E_dW1DZ zjdY_*gZ{-Kdcb3@L=PO7USa868SZ`<6%o5wyy>2~+I2v+2Ybjs0$cf2IjZ%^GK$oyn?E3An=#I?1#nJz6Y(o=CW`r1ws3K<8`>?6#6_D?@ei+7)+m55EY z*(`{)adw2)(3WHfLOcb#AN`7>fMI&9R{dF8o-V##+bHH>3@QQVCBdrf%#q{bxXy(a z5b1%?qu~2q=lJ{loVj+LTr2YV09S1V09IrjkI3QLBU<{LrinSc(Gak}*+lFC1Uq-< zsdSDIJ%U+PE;*irk}t5dRn=5i{pG@#xRsv^dkGjSk`I!@L%BVIR=SjAMoYy)E``50Iu;>?)lp?1F%1 zT`6ZE=nU*3*fm~;2+4IXTu=OBy<6s)In6<8yiisxom zQWriWjFZVLgx&~%tcdrwhgY=^$PS_A!ZSxU8Qv8DnAZq+XWil87~3qT`%TSzdK1c# zaY_>rW<&ua&e%iN5}Bw+$*{P}N2RJU<6fy4&aAv=?_56>nb9JRo;vq%ulFCW2j9l} zu+ByVOl=Uj>T1vk(lMa1*oOvVU6jQN1=k3r90=I`a_l@ugNTpE`|gck;I|x{002M$ zNklz{v~zhBD!EdPtog1cvvU1!5lVMS*Z`+>Qs$&`Wk~mL~tc z&OUgMg!eS+aTe9+jDt0?$Yg@(ipsn}>e?6{`Xj*<_hf;&K8sFRci{mBdV2(}@`!*) zkFZ`Y6_y2Y(uiq2?t^)h9&Df?kr|#w$V?$1KbYAFjEyFm{~nPuK5Gu8{CiFh@l)hL z6&X{;@gQKft(^;2hKB8z%q9>iAs-d6rwrJBxE2aueWqtO3U#2H8`tlolhykG{)sdS zs6L?B(y+g4eoSh`0Qph}z}Yn4T+MUk8~3OJ=xfZmi_Ft0q>(K!&yL<^U*JhkVbfc0 z(T&n(lIu_Cr7j>T236U1tq7 zYBd`5l=E=Cje+bm%oAyWrT&Y7DPvik95vPO|e#XCNSk@n_850`Xi6g zliIZ~_uPK_cE6F%{oSrL$|xfk8tft8(6>5Nu+6kgZ;}Etj1Q8i3w$ zoFYaO`?gv|Htg(RY@w$GOM+O#`KC3@~*WYyj8BL!D zN9{2ub+68zG4IWunJ=wzq37JbzegBbM)1yP`#K3-A8|NJ9V1CT(oM|evf-$K^>3fzJqYA$xSJB0yj?5oET#L9@jZ42djYr7ytXh|~aOo@p;lT8+N5vloRs^=Zxr zM{(JD$bNzrh?t9e0N5BR%oMZ;!p3I|CKy$59AM?|(q0L8iT48FLP3T7Eb9oxpnK-8 zkrUnsXi_%NBWq|@0Gp+{P&mT8Ac~+1L66sEo;r9!8uxwHK!s!en4v(0l}c7YtoKS1 zvVvIUpu$y|D9Ah%MKBU6D%?9j6=RLOHRdJQP-q$E(o?6$Q~@h^Fr;N}vHNU6W!B(Z zA;55;Fu{V)k#B@~+58v@uW8xqQPxwJMK#Eg#_L}oyyP*)1x_g}BAk18(*<`u6sP%z z@@~FEbN;hk#zR3vb%LC93|Gbp5{l;zFK9)F7g$+hVN7`O;2zg1uh$o#!~zCyXuNoo z?FfLWh1cG`lopApKus?a=ttom`_k9Dv52AR=|3nOWfBX{*r1&gyh>Ds+lnT>;6El4 zd!X}#$=7MNy&$R*zbKE?D!39({7*=#o>?ja4 z;vOqI`GTd1IOyr73juRISls|5U=RST^l<1k=Kr7-_e8pI&y_7h!k5vS?Xz`ea~P=m z&@@s5%o^I9R(_|nD>m9W-(?#mcZXF$?g~F9)6y65`gF(9>t3W z+YlY0TAg3aU96xq&B3%fTd$49Y0g91vqnHVve;KAg6pF9bsXhv;f#+Gkit5V{v%%q za%adp)$=Pqn6ImWpf}1qMCWFKoPa?YEXXySY(Bn#*08gAzab+~yj7~h1rvJU+#3%X z+S}OT#IQN^2>Xf`-*I^goUG@`S4F&u$}B&33P@L}+G998(s(S&`*HM8hk+o=IqN0Y2%epvLEaEyKt7C9 zpf?WfP1A+I-sR`4r@5>b$H>05{Zv`H8U{%bFguQnj68H`z=+$?-ahM42E5QZ4;tQn zv>L@?kJpK%jso2gQY)h1Tn7i>$b%9r`fA~1au1{380)XM){YJuRDwPu9?A|K2g*c4 z;2L(4VjN@4z6fv{=xI}tA{~#v&9MzHF`GY2havaYllC^(*3%ASuFlRzE2$#S2;+=< z*YSFrnjq{j{4K~fe^nr63YLYJJUQg5fL`9T;C^6w2(+goMcUT8c|pkD6W}!JCd~0= zZO!^ZVud>KBVd?aX7^g2(A9e>A#13fd#P1Z*RBL#+7{1!me)1zH2?&`@|SbZ4sL?S8I!ymsDO1h$6Vf7pmWv7v!Z(Mo1ORkUO1(PIv)804dUgO%X&6klLfPiVi_bpCaf*ORHuZwa-07?I zKg3E@k=OW;jmfKccLY10mu~J&4xwaMhaow-W>n+|%jE!t1-u2oNrapo3<&45Ic}x^ zS8Gjf)?+29YE23tBp!)aZ;TctyvUbIjz#8khO;Ut<~-gg6XDfv7DbGVksrobYzubb zz4>lq<*`Vqk5DSpW3LA@?jj^yTZ_#uh5#@&R}0Nljxq%hr<`o^?3cdEK^8Cq_uHc# zP7V)giAIZk1WTzxp#q@u8^8bCY2kCX(kbAPJx(psI<75WPVZnbtz5s3!30w??h-E0 zcYpBrfM}{M@!E1cJA2Ol!bJnT05*aKv5*R}oQ9Jek4Wg6g^`P6-W~#g?MCh_mh{;+ zjn7vxZUD6i!!B!KDn}G}v37BkDswYTKOiR8Bb4&YbI+%Zy-fg&&FMLq1H!fS%OPI+ z8yv?thO(RH-qRCU=P-zZ%HofGJQ6ME2EjI2t78O51)<jlE*EJf=d zTDEE7tBCaw9e7+P>uO7*7DC?q-XVofH9~6oOFcqTfhZue^MVS`JSg*`6vloDT+PK* zqM940cQ$f{p`D-`_K7z`fI-=ekG(}XOIMQ!&1hAIfD-g*iH>rA^Ga1B0Ytf9RmA;`Iud9zs7-J`g}{1Sf;we|U~(n~LDL9IwhkT8@+r z_nzMc!zv70`>5oI*P<|H)4|3gLY^GIOGA3WNALUya^?f{+XYHzvVtDGIgW_VV?pO^ zT+dJEq=%7VTGi$@i@$s;TJ<{@5tG7UH>(8vVRn|rel&}o@o%L58y`dI!kLDpNwwb&vLe-BtQ zi)AhTLcCynAukB4Ss^Zv%8IAV*%kC=1(D1;QWsQ0rswFH3D61+x_9oskqQB$e@jmq zID!dzXare$@MxWRxZg~Z_x0ol@R!dRpLAwO5JF?U5a@g+gTcNAA1Z$Rum(Ex!J>U>r8KWocbWWYovO+9P1jVYI$FCV)YHuS0@jA#X5gqQGGRnqe1CG2p#K3KcqQQ;Ya2R)@4OXJ8)Ww4z_)Hf3t zID%6ZYaBAcSS)IjU(B=2Bcr+)2pXOOT#W(KWBeX54-#*bV8PW;33`;V&IWm^0)Ah^Wu5B}+OB6SPt zsjs}0I*`>Kz^%u6wrK9J0#?HW4FkYEh)E}dU0aH=7w|j|8(RZ9=W)P&U)L zV@NOvaw~BL>};k*3hF$*^_Yl_tpGJ14oQ*IaPe8@+D;Qqt3dDaU!*cHKY@~RNRXx% z&?u$*sUp|F$-oyt0=l%t+&S`Ck6wUO*4VTx+cO#o)9NvTys^5G9y|n;uUz3U4S*VQ zDo?#ho}z2zgzF^W7Ub%ju%O<;NQEvh?A4KR{uM;rE_wOrltx?9osnk;+pNPZIYfzdsI}3DKI0XdUT;7h7(?QyJ$pe1%Dg;*7NH< zTq{rRvp6q2V6Uw8`Ao+Oc9GcVJH7q<)Op7}3t$0L zztB`#T;bR-)h{}Su}&HXBdnQDK@ENJ8iE}4gDe`(KlxgO`FPEcY!<3zS0Q#K>SI6{ zPEdCN!64{_1EhF})7%#Br{GuWp}bh(n6wsdctv57@KW*d?6^1t1dD7h#6Ch_;XZ79E}Es7Opkw64wh+S8Nza6uSsOO6@Fkd&yMe|(=OobMnIu#{Eq=(aW(W6~P zaGQg42AEZthsEXnSm5w3Fm}PZfc}nvARa__!cIY=%zsRKgT%R zSwf{3D4I1X5CJFyUUDZJq=}f5%*wLg_H1^IB$fb6Aq)&(0~!(f0)9_Csjm2EMHn;ogc<^S59&_ ztwE2fYZ0JRpvm)sF8Q2{DA>`fYLdR|poJ?x=<$)>1RJiS_m@Qi5J49T%E;@L=D25k zBV+}aDkSOM`E^(aX~{9z{>*1PUinKQDIoSM#wu9iQUNW&!FVQj=H8A!qfLdnYp3(0UdlptNgf6FwX?*W=M{Bj&BVmRo6wailh;G9ZC4veVq!RVvdJ0a}>nz4Hax`llg zU;|7TLmu88Qb;M^AhzIF=mvqxz@zGXWMu^jZcaFR0Ju>|8X|AvnJeR21};ampok%z z(<3G(6m;gM$jn#c((pt{C6!rBNggwQ-N#8)+nR z2f4o?OS_VU4B$C}QjH6}p`+xjxi5N6G%iLkmXsSB`i8MxLp?6a3XLVZ6I>XD;2yK^ z{qDU-tO>i~UNfHX{^MQZ&sd*KHUX9ypWc>H47MJhjrK$AUCaX-MvJfx{FH9_EKouG z5o3mULdf%H$S*xg?sF&yXi{aBW1V3tXbL~Kih<84U(LTB;!)OnAHBw#)I`v8b%ean zm77-yiNh1jb%9f$<`$p#7Pm?D-sd;3&hnCBpYa)&wA)t|su(qhzXsUv|Ip4ETvcM)rZ}uoil{2ZYPF z*&`^QZ`epefmbO+8)6|`3X%h*rLZ9Zi%8wV!U8eP3dcy4^ZFidq z=%#^@Z*_O^UFr|>P;tUXUhGW8$@V4WD_ifJ*KYG|KR(lnBQ<&4XQjbi(+)-dFd@jtjDudd02`rXC2A{4!NW@4qMXNh0Q5YgEp1V1}m)3KDf< z=!sM=?vg&^34hA}fT7TPrjw*M(mVpxYA~?3+|XJ8ontyzeBZ*uD9Yr$%)3jvA~Hk| zu(HDFv8TleQ0?ukZt!r4L!3gk*(^2VFgqyUO3rZoK|m*o`>%dPMIW2hXOd?gpU0${Y^F+&kwiNbi&_dS z6c0ph0B9M9idL&w!ch21m@=~WcKE&@;Eg{70SYIrB7v|>CGndOorkE4RFj{vnJOEV zovMQrXhJ&=Lioi*{&c34j1IMxYlrKg|Q5__i=#&5a84U z3K32a{EM%CJJqOkUYx~R|K%?TlMd61zx&;^aC4Eh?%-L$Sb!PegtT-G&`QtGLqP2P zcizXdJ52B4+0By>UYS@3Z;AUT80q4r&;wPelaf%Sbr*f|Y$Z~XpaE}@E5%;bu)a$d z2dTMx4`qy?(Yj+DES}3wY|DPv^0yx!i^y{zbaLaQ1^%G;qLIG*^=|;q5om-h%wf|z z<$3S>nk#QUFrRs5 z3j8kjb_{-8rj!O`c7>C_XJK#FhtKLkk+$L_Jm}LLTXRwc3*qJCdg)CjH|%NuP5CE6 zY>ZuHsSp^HYfiokVcX_>So{DtKP(%FyGy?4McPM-Q z4!{PDSMYj_Vf37AtbvK(WsHm_LRAC9P7S7r?8LIT48R-o}y^$$cou%03~#}h>Y?SFT>b|JRP%<3hCLkYmmopGx#q3y)Bjz z_!$z7;!O6|C_|ezTqTV0p=oN&sgB{QqLZf=7MGbjyb5rs(dKOe&?}G6)78b)h-q|* za()_*uKAINs~Z$(J;G}{1Grj5*_v`-m{*Vvy|A1k$PIJv>?!Qtxpp4^+D6g4#&x`H zkKX);bnA&5>FKY3foK4-nQP3+b%gHm($y8f0sY4@ZcOud9R2jPI8$--e45_Y0l0-h z4NY0mZW@=Lw^s)ESE4QL2jf-gTQABpglyP!F77CPXXZ`{|$riT8s1`9C6%BZ}utr zluZwL6DfXN6&QxSV1oh#svE0}OT9aeQLb_0!7Zi+>LqR~&+&er(9vZ2>h(AcHOF2n zXGZ|4rmRk3Y>x2LE*{(t*~4deuF<0s{Nu@J=TTqNh=_<;TJEX1Y=|7L^P$yWe2@ku zA|FJVor^T4d^H_Z-6;MNx9P17M-u$2K9CCw$TAJ)Fe;G;;u5i`(K^V2d5Q&$qC#gj zNY`qK9(s&ZmvMMJ-3+U#TpV6w1R+d4zD}iDqCh43V?vNo70!cIz(oeu&I1}0&J5Jh zrj6n~YS>e^;xr9DZ?2;*bK8-qELs3J%3lb>-$tS0g6A4`lzU*+s_Z#w40@vK`#4ANk6I8Ac@LGY3z$b@A>%xXRo;C2?``VT3ses4& z2=HFv?-E|LK#UM9w2>3iy37}3$`H^_~^tO9;srwMw0j2|Kq}ZL&~xN5w-v%h zF>e_Np&;1J0g%hANm0dx@XN8bZ~pT?rdNOKx6e&SC_9@;QX!w|FdDeyIiNU$o!unb`>tMn9hdZxN4 zE9oEvowUMpHL7H`_=$K_$W>TZ4{7mGiC#1F)>*CYz%sC`H4D=Z06YLiJr9CwJ+6kX zO=0k8Ove_d4Y7s+nBb=XhYpZ&Iqm}VYp7j%k5_rk@yPSslJA&-1P?#cyLfsYknjmS zXD%@70`D;h=7kpnVG=C~nO8Zg)S-3etp#t6q5>`kin26cUht@33*RpE5q&_7F7RB{ zSB`WftKFNdMV(yH8td3T&k8$D35b?ujMs}p?nu+F19SIuBr-mY?-@(y8us@LfB>k| zP(m~CK2_mdi>3%U;%xn2Pq#gHHDm=$_T-HHW1W;01prn{<$+SS9>HT}^`;r|DWG_y zb7se9EEvs-HhS#Q5aTk6#o!+msq#XQWfu@n`I4@DCPTxx#K0GHGp8ZX%{mVN?oluv zVo-pbXPpC7E~Udf^J9&1FUI_!Hg6U?=w3Q67KPW4wZ_y*lM={rqo6BkwC!4| zWVpD)b*6S`xS|n4sp|Q6o%u7O2KnLz{g*f6*47)$0Rm?1%0jA+k-%r3zKe&Ch#DT% z6Tt8$H3zSJ>t$%-BJI}qQg309?$JSK0wrvR8FLHgl=slExwD2#&j4w~0tOyyFoFmD z0OQx3*u~}Xbp7Uykh{{qGQ-oiy2yX={~kq87I)3!y=@@tmCx6nyp@U%AMrZ9UUeL> zKP)Ks#$ZP&+Da?h5wZa`>Aw(Yaca~w`GlqcHCXp)WNw$J-59B0Gsw*{`}iK7yTzAY zf;aPNi8hh<=yG5Ysa|EHEt?M>avwX85OrZqr&*)@y*2*#1S%1OD7A% z4)+c)NT7F8=42K@tN>()?0EInm(ySU`JXFVA~I9QnB`(32PIO*NL8T?nyE~q$RDHy zp5AGIvIiI-gOH~d1X(MgC(>MR0g*hQ0b7OddmNS$ecGA-fY)>us3U!zM-PM4_5!v?V;bY}aXpO=EJPTxgyKA&*Nn6}j4?ue=IUGgT|sV?i7>TYKXhxf3u7PjA}iPU`@S;a_bC3#ps{= zW%S#5&p^ZKzKCZaGt8gWsd5hejsvQvbueKpb!zBc)>04e7{;EmMwW-{7&mKz5!m9r z1HAn6oYGoF-wPZye(mZLg?Px^LZts`q~jcb7c|h+eda1hD>2r>PCaS{PtGfVn-ClD zy^@18J?2-$@uSNz1A`i#m_q@&m zj1P?~{^vR9T{T4P9l#wqWy6EWHD(NgT)pZM^%8HuLjiOZ3d%d)%leAz+Cf+Hd82dc zqDY4*U3vC3-OH>So9c&A>P$G#g&C^Oz?@1l+{0R0Y?vrpfv(kLNYDk3bJ6+R5QRAv z9wgyw3S+tORKOubOuToJ0tY=_dU6BYN_jk9<)tO&rmKYf zT`!rS!t3QEhCRFT|!v!JcMwT^9jfBp5Yx&1CzL9Y`sT08GdFq&3)s= zv<)*EHY}k{+T@a!De^H{U(0!vknV5;;6+iD#eW3^Wt+L3;x+StiWP>u_txv_=>CW4 zx$nN3F1`3X#sWmCh=h0t929Yc$MOR2>jmp@w!5GH>3cs&%>&jH&n@RM67m2%F~$zo z=>@=`1W>InTuEJ096A8rz5-!LNQZAbWsI0F$4POW-A{=)^DMlzYMKkyQUc4uIm{Q& zjr(s7nU$JdEmU5Uku^0Wy1a9ORUUluL3;U>Z!*pO=>Bnx)#wqmUSr4;Y7OC9Ltu}g z1oV^{vbSwZcx(letgpQ%ZEC)}OkYZY1WYac87|cmY%bCX-X}vZDttXHTB(ui$k?O( z6??9s)~01v1&av^3$np z7YBeNIWYl@nSH7q@236%X%={*l}}lVFz&Q6}gaRasggu2)m}6m-GNB83TWkY3Uj0$wkh;Lb{R3tRSAw?dXUMwM#j zLxeQFj(v7r+)H;vN!|5ClXe!ziin)p$ zv8%=pfB56{&wNjy2q*Av&CgTxh+KF`Ey6f}(rAFGU-nDwAq#nOC~ir@f;BxkD$uCK z;NE)t)IV3q?-xuMlH4TUUw$!P)2QJw9?TYAtSdLJ1(5KFS<@o+H;FhPi+AtcOMB#s zPAo29P)$eo7#%2r7QK6;6ikNE0zhby?>kPx)CG}3Ybj25_n1dHt^fQTQYaSF_2=)V z^#}LUDd2K;g;t8}QkjKx>##tGzn z9Ub9;78NY`b8>iV+-rY}XJg>&S>4+?;lLcc%5>DJ%`HM()wFo^a(KD-9<4{gnz_m9 z)hc?j%Fh7~jmFkydgY6sN8TT%pZ@RvLkb>ih+|=DVIfi(@|@hWPj2rRz0EzYcF2CK zvpaw!+dxIS73=Te9v*ZfShLV-5?*bvUV7{;K)V36&dyE8DQXr$S*v5Jz#+9H(zub3 zy=@P*P`;9P3UF=X+}Wc)^4#TXabNL<9V+CN7T#EEZseIfy2iZl7ETwPRNMfaKt~?v zk9QfTx#XEcL0KIfA}KAtLHjWn-IhRgvxU7{vE}`VO=M z?Xs^%qm(a0WiEP6W$W+Ca)G<98r$V&xw$--`5Jk!_RG9^z1V&DJnQUZakmZ!^M1j5 zy_Kh4Ln|Wa^-eHCh>y{*xI7?$EqcV)QF=lc^KA71T67dua{!L$hxplptgzFqulJTt zSTGqAgz_YOttR#oVN|9C7nT*szKWG>#i}jC%P3L!g&wv?SbLrrgQ6D9QaCV<)pz>I zM{z*L*yJ2S2jz^A(A(%Ss4l~RR!jnELx@f`DuJO6+oEZ~z%dXDgF-3DeEbr>EA$NU zs(1td5=r0~7GBR`^sVFd3VBcV&Aov5^(ZjAOn)Wc{h#}y%AOo+7UcVB(75fiANQVzUPG9}w-%ESs zC)#sSuT#(Str?4+wT5A11XX|iD82RlA3=l^7;rM$kitGKrXdU@kSWlap*nYs(41Z# z+GEWuWL_}lDlyuzp??ZsINRbgRR5xkG(_}L=>Zo+Mf)w3zDh*~bS{DlJ>6}?bQmxK zHi@^uzI%z5q(@IqvfnqJzJs+z3IpjVEy7L~j!A=t`D|Mf=Z&)t1Hfwofoq__MEdYb zvc48E7z!D)CnZ+@B#Nqym)Ef>tlXDfJS-NB*b>g3i!ylRML{2W6tWgGcpokRysWpt zK`%i7c@&-@5gABo@PETedNbt#(^mwM()MR~HxycgVb}{vEpobLa_Gu_qM4^x5Q=hQ z|1&|v-m-s&x%85FO|%D7$Z>^0${eVSW(BOQDeD$OiLn%5`Z#=oaY6|EkB8a4Knb|| zJRRQh_n1O}b5kY+Q1X^XY3AT_-_4W0aOJ5NIK<^%s&hZR857r@Pv_=M(Z)@n*xh>= zDIisVakGavkTK+2w6QziN4a4bYB&jep#=uYwalIT-65$-K9gHyGz0BOM|Js$IgfBG zr=FZ9U_#>(@s;*I`~*;AeijZ~_EDvwAgm&ritxk$qj+TIdW7DFcoYS&c5$e)ZxvE3 zx`%ie0b;_s&s-qT80iI42hXpvabW;3CPSBFb5pb>!#L!1g<}EFh865pjISYpP(w?= zt#K_7wqPUZn&0Da5}s{1Gz@3PsQj}qvWlYbqfo{Hng?rpr0}3vc!mbFP6Z8|BKOxo zF*2Zd3Yy_wJ|K*50D1urY8|=PL_r48c?>b0F9?>ubwaR@0c!aPzo5CZ*I2W7Pg%tp zGa)_$Px4F{84o$ARxi0S30es`2#7TmV~i6n%XrSKcojD(o@&8khNB;>ZN~BNAOGs3 z05<~BfBNxHLJ_W`7(e>(qv*5x;fMFqIec^8AQuzw-PFPyT|Z7Z8vSN~%?rBD%&sg0 z?l`(y`r(;%LcgOPIKjL;Kw{;_6=W(NS!y#r{q!M6k^^c0mD}W_?%?J1bLQNd%lIiY zv%R_rK-r-?%!AaV|9KY>GfFg2IdDv8mf9$t5N68h+Vv{{H;%eJ2ZUe?n1fuSaIwf5 zbh!Tl^w46i$Ey$0w|?u}0FEt;KB8WnPB}YSN(=J<2mp+y{FTv70@)VMgrdK*UtR9e zhfWRe!%PtovWO0H;2YYL2D!FHRIkSI!J}L^t}@37;K2_iJh+V`Lhk_G^?Td!;u8^4 zKEb|!=FlKPkOV*g1keqj(OWHDd)Aiw>d)s% zLIpOfDl7B%dw=iyEZ=9h%S73BQ0UvNxaf;Bma>Oz07ONSJ zx&lZWIaM!dNKAn>482PmF|M372%>dDIe{!f|As&KjFDh-M>Wa>*yg%Vk!s`MnF{j* zCb`c!xy+IJ2wqUuStxBgFI&?IOih2Vdx%0%eplx>?M(dB1*{q&8;ck)2oE218|<*) zULzvX*N_=@E>dPpz!UZ~Hs4jxb_L@;rgNfCO+Rw$DIOOcz@UB zTGe}Y#POcy^_I{D79T1v%ne2nrjs4&cIb>Hvj|qAuP&RfH%pM_W|$l=$P@5Nhyj9GuVGFPNJ6p-R9AQrBAHsG{~|)u z=HZYc;b@cS`7AfG*u&~^!I0tH7C`34H@}{02&2d^Vs906<}E4|dW4L%fTy@`lNMyC z)iea)_VENY*wRV1{U$>9=)K3Oe)J@L`Op3|m2WJk+|gb-`qiiO6{a1?6m4Em&a<=2 z2rM3f@c3JQ_fvo(pjkzS7hDgdy@=aba=FRXRNZ<3z>Wg%qI5lOtT8B+I+hE_&9)txxjNyFG5HP)s7EO(h_Zi)|T;fAXt?>I{=2#w8DmV zNQ5zS7(5oh7|GDWmOk6${&BmoUhTQ4ci7^g7>DW^Lkm^}JDSvd>)0a&0e`bNTc02Y|uXk&c~Z`?Fzu;=jZ_AsIpcY50*t%vqef|mtkObvRRgCDJ8&p@LE z1us9__u1aE7U-D%phvz&75of}G&(YH&jQ^578zu4kI%)Bo-VZqmjHC=Dh6dh8%Bln zFl*Rxggg&)tODnmOAvJXbT{PN+zbVn5|;}_^uUgYBKF%&C0xk|`5%Aq9D4E8dG?p~2*3P)0}>lnAs5Zabq>Obvjxcx{y-F2{`M zJ_E#KTyVQZ3dt-`Gfb}+w9k8&Hr67wpitHrL*~Xi7TqVsQtGjeQC_+n1c&g6n>M?`y)gkr3!n{)ol8m4n zeEMm6;l(>?W3H5D^9-cm^O&n@_E(R#hVZOWI%Vf1(Kfw~hNbJayu#s9g8ubd3p(t6V*l!C z&Qzma*<{#TnvAHMw8d+Y$EXgxDI5l998xi>gP?Yfx>Rq*6Y%d2-+PUVwA(1ekn@@L!IqIejBBy zw5#;lLhdT_084(2PR0&E9ebu5l;DLdd~IEYx&5azHO`#+1oq9sK&7tNF{-RH#Z<0` z4<1KUIHEV)%e}EUJG$k-@9Lj$46#q<>YBD-)L7tb?spXbDQj478{FY}DgWg&4Q4&m zls=+!?AJ`U2%|#EV)no_tA9-K@@y^A8}Y~yQHEBHl!S=jc?9>~SFg0QlJ=+T3pC~&!3=%T#%Q+X3L0M`?CYXdY%IHytAJ_+@IvO|~zMHa)=tqYB=I z3y5uqCEq+d;vUVkf&2g9<4*}c&`#@$)*DTd@o8v3;$S9tUrq}K@1>Q&hS&SV&s?4a zrqyQ1+h+@qCSX9p2zG58t_RGpj39WN(+faQaPZF@9f?6>NrFFz2=q)_q+%#x1S3Xh z+4pgHhPtzSZji-u@r9QY<8o+=vlEOzr`Lq%jvjL`45j9&f)eNv^6DR+M#$4PYy+yF z8wmYZUw$!Ny!-P+Pnz`V_r90%Yb2Dj-z#hDDSJ9(9Ps`0(zjmWHCBY;tWoj%;HU4Q z+bBLG$x8(@O|Fs?-dP7y-q`HLd{N} zve?O#3d2fz;ah|t+*t9SVf9D?M<5`z!owl>6%r3IcGwQc##7MyEreCK8WOrpfBS#^ zpXodQ>feSioMs;nP=rOohh2(U<^Tq*CLtXJw>iXCab6&l%nxgmRDXr_sI^={$!IAK z00XW=D_tR4LusAxua3Z-X6l}3SP!x(7e#WR%(JTFxyTLh3;?+FY+jw8L=Q{7vU+$k zfF;|GDG4NL_+kNGd{7Xe@*fLYwFGNuyxLpAhmYXCnj(D#37{UdRGp z!IJ3@kxQm`m+R$Fk|CJ5j)hBo6j?+apebqB^pSrEtFxi*x4EE>1ND5IRHP8`fb;-G z7zqI^EtgSbDg#@48FCAJ3~%e{t&$_U{?ZrI&%XcnVR28uKemSH!#{n_#OpOVgN&a= zh}P*8k}}XL&##OyRY`AUD4Fq*!+QXB_R{T>w_^YJo1oVaDN6xZ!mk*4<)tM+58is* zU-RpPr+4?#yyv0QJ%x%7lkWn{*hMAu_OY@BOrS=h?LR(hE$;Vn7K^+%EvUg7-28 z#K&*H%g~bNG4>FmcxSVC1*Hwss{|hdq7{avO()9Z5h@{wElN@Bt&uNp2LR7&l5n7@ zCI={YE0k+^%rpiy_SL~gXKaZ!flxK-F=F36LS0LKOORhCWyxpE z%a`oAR6oPII}ZSr(1O!A4NaGMfBPf&g7a4%4sn`T@6m)8>=<#u;r#rtnVz1Qvy053 z1z=@qoz#%+G_kV_O>j$!>NIR&V>;h(wkZ9gp>+5^kFMJw;&piN2!rK16HjTyhWEF^ z)V!_dH%LRFPdj6d)kSu!vHoG&++RE<$|<%d!q^23Mt-i7I%M6A9{(bStVXC^94_%_ zKL6rPbQLK}yzltvgtm3`EvD$)1B5lgk@0l@B%K~zU`*HOjI*95beuQ8YfhfE7V>c9s3Bb(l1#K?(@%0K10wF@jB zI>!)(M-S*sBhHAC)0~fKh>%Ule)UZmS?QTLg-u#CqL>4hnX`Tg4bO$v39FEN>p zXZI98&&$6eon)5O1^XQP%ng9AQQM;O4cVsAHcvjWBY#aR9icnrV?FOrKDoyLsjrY{ zOtB^FU0GiZfPRHBIlsJwk%AG1v8QgCB}JjtphgQCSmDesDAu(eDsUD$F$aLnqBE8E zrn?nr58B+hmtOwe-=@bk;vN0Hc5NLUTS$95`$P-ypo4>!FtkhjTxWfkc%bFm^OIvz zg)Y-~|NM_Rk9>Obr*FVF&}n`?Ew8LbbYg{kW(O9nk{_QFEcP#X171gasoBfa%FFRo zG**nVvzvmUSMy*)QLA9sNd5|Kk3CmWu;OWOjWR-1aNI#7${+$sJ$x#@jslIZ3d+8V z#TM-%l68&ulQwaWX z7YRc|0;L*8qFyXklT&A(2y9NuKcETitZ9c{KY*9l$Ks6r;M z8B6=&{loOhFF!<3P4n#J5>f25u)s`h_N__2RcUrBjh1evI)!PV%CQjwgY^*z?$b0m zy3I!)V_~B^pSg+Bp>+`JinDhA_}yw^jR~jfMK=bNj~?A7R=2Z z8PhN_+@j*v802m2uY;neqW}Ov07*naRJq9j7M@YhfZ;W%P%p4FXAWEdZ-io-^Wz45 z2Hry`!Ct%vPaptp)Y7=~et19q1Kl}12RjQ0^n(6br#?IZu^=;Yq4+`IY!qV%0+_#c zBkkOO7`a*jRCR{%`35u~NOOvj0>1SWJ{I&)s4%sD3uEqaq_7AOcsu+cBPbQTzTOM= ziRT%xY2iT^z_1?{zjV3p$}7*O36$jJ8EZp{t}buUR*2rnC}jb62@g*bzL`L2&u(pj zb18g6Ih&JJyZ7s~_QJENMrd(BxPOvoYhJd~EZX<*@goigz6BsVwXARW0~()1cz^k| zFQu1XxS9U<|M{)-{8zqA`Vby3c=RG$PBn1i2u3&j`sD5diudrIplI7DFozmQAj_<+ zhw$Bd_g#Q090^EJX_>#Tr(XLpJVwxug-yyA6�@)^ER*R-d_p*Ny(CC{+7(YA6^* zg216)`_}?FdA|cNOkWJx^)c@VCXQ+2VG94{Ijun`;1GK%u{0w3s_OMj&$q7t#g7HyuBC9J13e zu_0^~w7HUcsC)>Gz2G2xzSul3-=m9(kXfy1|hD3J+{?=rFf`4

q%{2N zfBuisBrPB*#RkmSUv&y29A^PS|NH=F1$^wDFX8?7tgr0li}JTGwlawmOLOLH^1kRe z04XX{iU6xJ@=3tff^PHVRSclB%KAq7%U^D%|Kl&;MxIc#yt&)9TLjD4?n=5tTJV50Yj-lBo!HGQ7UUY zsT~m6VpJ?3*#b**MQ3pkm@htq-a4MV3Vg}=|~5~Uc`OMTAy$#Z&^>(GR`zmphd%C?!MaTgNPf-V7N8l?8E&F0CG z=H7F7CvAOt#FSanCZ`vdqbq~?tVl)-1={VAZ>#rIkLRV{ejIZ~z)e%N=X(Lezk8uf z;T)jzGa@HE68ew!f#}Wy)~h}W#|%3091s2#>o*;sL>HW*UT&Vnl%V<$`HK-YU~f)o zpV~)m%+P(xan+5>ZdxE;zsEVKZ9M}E#>*J!c48Wj3bzA=DJ8UwDW* zoeo*|xj>$^U3_|%^%!L*@3?*n$fv`@4h9{##kvU#d(RKHDcl6VLKC|?_tVO zqYiXp+f4QLG_)}&;mt;d?H}x*o?iUjzc4?~HbK0Dh0@9c&u64JKs-n&{vn6>uzwHN zw?~eUVn=xmB4goYZV-8HAsit-h(?gC7fvPM`@KdeMVP#Ya5v9Zf0;mPW`&dnWOGGJ z8Ah(ae3*r4i5DOJ>Mic2H~0#oapdVb1y?JKzFdz4cw3rDoKe}%{Z0PX+u)(f5CLZR zS;F;kv>pw$hj)$lE2s>IYvBnx`15a)p8Z*Udw-P5TeKr1XK90Z@Jj-HTLvMVt`HV&62i~7PtxOiyL^^Z2tu19!UI+Ivj=0= zudtq$?xf+|t<=QTj}$`jLAn?)s7ACA>0KmHb@x6XT+hBim`(E|J!hEODq8cdJS&X_ z_s8CsJOY6BGQ5!rY>k`o){--$zyR2>{c+~bH|Gs7u!0KeV$TW?#M4jjr!RfwwS>Pb zO=G!rq4F#8j4sI&Qv`NltoQ^?uso<%<09F-<@t+$h7exFY9j|4YHSK+!eZR?ZQHVFmL!!&H&> z!rG*L_d$Y+`$8LL2bBPa=5Y$JBX1o+YC+?pGLF zkeSDGH^35U0gl{5J?r)bZL?pCq`zE|uV*^YjGl+%he~5wJl&?f9DC$hAb19N9tG+n z0GeG#1jqc9`=cny0nPHB$T#NPK!|uu5n3qGJRVsEuzX?>i=L?3CC|*6oj&|5iq4vx z|KaH|0*+2DeZ?}#EgR0nDu`Q|pW zs>k;~NuPcEAvC~rqT4T|r|>5MrSyb?(HaqqqXP<5IF}2+P6m*tVb5%9IvlWn?EN(7 zlhs4X_45e+uz>+zn0g@x1Z9yos5ge^D&qOIdfHTl=fC>3$V0X}hvDH(8y>?IRUvV~$7<3XoKx{3@wn&u*rvTdQdTK9cuMrOH6J_9-qQo6UQ- z@MeM#quyWZIt2HI742+cJ|ORswD>Nyjp^GO22A64ws;-dkd`xg!%<=yK~VtBH3~-Q z_)mgSH)w>J-l5W0KuNzwR%9+e7fiZCRFEtf}k0Em>SfOA9F9yJF+gvyjQze9-T(Y2WrMU2!-x`<4#Z;>W)nYLcw**NV|N#{2bVSJC6@X-7Xty##w} z+bU(Ssh887K_6z;apyoJ4Oydds(2kEPIZI81Be@rSJrEk`plEaho>09>s!wNCUF3< zM@~7@IH|+O_8-0=vSG2bo>U`2b-cG7e95h2<-!6LLdj;VjXL6saSys2%Mu+6d1Ck4(Gh= z4CXnj51cw^twf}Uiud^EwJjdBA93hY0I}jl?{jr|-uUylI7aIeS<01AuzHgaEnRfe0)>0dFAHl6*`1 zG13c<&u-%B8Nw)1n1v}#@{m>m-be4h9hH=Il!R%7R&)mPXN~iGW>;$yLKY0@*)oS$ z7r0Ay)L|bc5!jlhVD6;=!DsNhJsmwTpm~5fg|pMN4D%WWcACdDN!0cPhJ)3ncrj5r zP`;L=id0XU0NAey4sh%Dem_0MLNZZ$)DgH~8CL}La5|y-UXAH6a}((hizoyL*C(h2 zVKHVCK`_U?9_>9&uYUVmX=ZB`!lc*>qULzRtGKYbmReXU&)j-8ef*O*8J>XPDihkn z)2-1mjmN4%_^3ruT502@)S!=JPzay{7ls@gXJBzcncT7F_J@RB6%Zuh19tUNwqS1a zDV2HVa@y;$(tmhfAJ zoS!QI{yBoA1t4+ilFh`OZ(im)rkpeZvxX*3EpVa?RKR68bp)y-9PNb}T^dx-FtNF8 z>RLUvdXY4YH24JVA^rFk;V|wI1{S|7WChngd*rk6UiTu3AlOs+%dy!GNm1#C;mF?w z$6m|UIFVb-8uW4th<%^egR`MH7UUtYrxvYnb=J>o>_>R&q;Xy|wBNT)T7U)qCO`5e zi04LJgLhHfVIjd;?GXx$34x*xU+15E+9fP!C9XhOZT442cR!s9R>SBPuuP+Tt@=N~(*RC_=86CbfGm*= zJu&8dD`(^xg^$s&C=6kJ(MHE3;`I@symP#ZjUx)2kSTg74M}GaZhG9tmBt8t+Oyf_ zr-EYY67?Fvt5&P)JDEc6+1gIe)c(DX(iCkVNed=ie3aT5rwsyL0CyIRsB{n8bY`J{ zVFTsTMqbg|5m|&x8{zH4|KBB*#wdhzp}cbVL6-A&?5%WTA(oS_pA%ngOut;hw;S-$V}GUYkn)%m4K6 z(|dRK(tmpWgY;kSJt6fBzCpp)klTx-oSE-ve)-}$jv@@-Cm+<(GTzCx`&pr`cjv#~!z!nb>dce}zY_z;l`U!ZAYG*l%_{di-HY=^+E}Nh8nQ#j)8}0pmf6#p84QM%$>`RA!T=N^6IjzUvYWEq z)FYHX#L(7zY;m^+ca?nYIsob%(5y_Ff)}md8KBQK_KTdY9#gyB2vabP#s(qlRsOo) z>S}Y0O+l;!KyCGt@CCG&Dw9) zo3=<5d;~z;#E6k>1=_p2yU4>a=22Ie@~Ls?xOBS#DVNksc773LqFkU|qXa}6z%33~ zXyVw@vzgQRhXGgsXlke|qxlvPP4#L}Mk(sF=MjN6K&;0=y8-1O|uo6ViKnqB7 z_6x@vy3}Zu_~(o?6jM5l@@kN(H>OD4kmoNb5;aYzLk?_=MW$9^@kQErVmR6Q)0968 z(2R~~pz4%aTxSxS9U7oH`4euY;|{Xd)CinO>N?xT7I1i6Sxkz|GRiS)VHw70wC)oG z^L%V!V4oSHTLbo5oMn!&>o?EZ+7sL3<#E8Z8yM^W@iS#Ha2qWc zz;g$7Z%H?P>n{YF*etQhA*?a)%()XJBIV*xBsz8}yf@)hVp9w^=}i?B`Lor|_B{)w zj&PDFw94(j86jQX<0xDI35YFjbA+G`tnFE9^?3&`-+;}`_FzW5OHD09+~hK30RmEC z*GLtJ#`PRZ4^S{o4ogrH@9}!kE(1a{_eA1_b3;&a#yq@&p2OvrvDX@MhUr;aya)CV z#oUG=CkWj(`D_Q!p~Jzd_EW@sm9&8y|9A$0QPRK;Yy!2M2D{*vGl&zM5K-YZxTNK4D@Wb`07fOcqb*1P}A$ zU(*hT=LbwQ5T1%I+mMXI`6vp6UKQfCHHu0_;U*X}EkvNJkk)hQUYb7OKI(x~i3{=s zNPZbUa)0e@s}(lH5U}d@lMg;f-~A`wMd^^U#e2Hs;m$&+&SPgg!pbu_-=^5(&?g`Q zWwuTm9B>l*i{fV0zSq`C6+H1-a+7N)u}K)k!YW(mX~7E^XRthNJg*mXg7g^kex28D zK2eh@<2ed4OqnoDYm&TV(PQ7E0|j@IVGWx}Kkku=-6%$YP}MchChaMR1wZnEMIwTI z(;v*)9V=|s&UvVK6s7`Og*aD+$qAn773UrTBEw7;jzk!b&%3|=3*!TRQ1!>2a}OgI ze$HL@IW$0|ap_*bAn?k;+ol*4;hF0%eF^?L0sH{0q16#*YEgs2#Z4WQFhb!UbE8Vo zO!x4!h}?{-gxCSdc1W3oj)zUGc!cf1V#mfHdon@(CKhyDn^nYF^`PqtwDa}f z|Hse}o{B-9_EXnLk$9Ar7`;8u{U7}34JH{fZyk_;LB@9E)N#q+_xh`DSx~?E)T?CJLa0qV&*Ia1-XZxW}BA27{diA`hGE zODXcrJUPh~z?GB59G<{I1b+!wZ1P@#{(RQRT}7^%cA^rpzc>UZ(S$Oy{y^e%nwBjD zZ4$rr#h0jE>jy7Q;mK&wANWd{U;&U|p0zaz^+xENJ+WXg2o42q(|ZeaaS?_gk|j#y za3J-9?G;UR6Tr*P(ooPpG-&@|WiqdXzTh!~-b}G^-=n{$Z})zDSVgKKsqAo%2HaDA zP*(UQkLXp-SOCm*ITkKyw8`soJ$Nb%OWq$21l|y`#y9dm(|Qb}8|fHFJz9k4GwFA} z{UTr=MTu9;`h_0!C=fU5G>>PMm+;h*m)xKo6Jgo^@`Jb2?|=Q(v;|0B;=J_E6fn}u zw4XwQ;i-Zq_^uZWX2*Ac?i9G|odYBcNssB#MI%cH+0H`++Ur&4ZH>0VUfU`U zJ)Oleo=3vQ847O(43}uIKi+F0M&)AkjRkCZOoq*; z0PTA7jZQQH+kU>Oqta^6-=(Ra&%AP4F$$;uv2|4MFUk z#y;zk7Go4KP7_2OPEQZs7Eqwyl$47s=6NUq&}7P${T;GqSiGk{GUjBA@4 zq`gh0TQ|3eY>=YBp2~k~*VX{db~YIgB(YWO1Uzq@lZJ@(Fv{C`_UKn{yb%*~JJ4+f zfbL$oC*craJ<_Oa3*(k+n<^*`n9nR|A9r)&cX&j8w7>U28R?!tPe#=&q|2rkzxfvm z(`XWnWPyGNSx^NJd4vLGlMS_nSBr%!eE7yj@dLzR>oRjaRp3aHc%LC>7dGx82iRoe z6oOh$0tG8ny`K3wralBIqGR>o3^|kx!b#Ns^n-V?R6+27M#LcPQ^;CV?nCrl@+|9s zVZ$;icE5+0nFIAu&7agGU?n4$9$N@pTVj;~1Q27P%DPz}|2Gzzcy78J$OvX1!7Mhe zcdZi;usKAAyL~RLvpoM?mtmHLm!C`XUwS#(_c)%_HM!AHUU;UvZ2UBb*v6ZAiDl)1 zFTU_>s$5@#2zu!m(iQf8{9{_rtm7sA?a1ew&EcJGXjC8!@z!ZHWN3?XwzHG&y-6BE zg_-XyywY5|z;ks`j6K33PRQzGIArOZ&@=H*v<}n810w9nY^4hIQL2*fV%4MT`~ouWP`1L~<2Fto^-Q*by` z10s$Y_g4s%sky~4;0z5J!m?kdf^RsKp(C8?MP8p<@&=pNV~jPNFD{eN-67*}?DuX4w;S9IfmRMb5hP2KYYz zX(Xw5q;su#X;t8@Q1U*~x1Uw{hZN_%rdvppf^^?A_QqPJ&UyGT{tGV~o+!C#hVWY! z(0{qN%h_UuL+d@C1>tKS(hDu=RYHz{J-xf;(sz#@1*?o^R#OX4MBE438IlqrFwSs2 zo(yu6r=S^tW0t506^#*|4FeiLVjgdfV$>WR6B?ZRbiq1;i!{TTKC?`k(%w$`=$F4p zQ`c^!nOir*sM6{;|IQwswiKgnLRz@vG8|fPo)IkVm01EH~ z>Al6%svxfeS{>Ca-P&0rD#775jW7*2y>Av|%n%N*VF=jL$Oww9m^$!=GS!^gbG%W? zU?elN!M*%HWQ0}zPOKcS8QMhw=Sc0yu$P9f$8GE6ZFm~GP^PNPd_Z}hWye?a9%sL# zv*1-_BYPgd3s~3}Qyl{6^6c_dw6f!UDmCScydeGP?ea?_Va#V^FQR;>r!qh-dnO?B z?+O|B-8^5Fq(+{Om?5+`RqmzleCK5z&rz}%qcDg@Jt7XEW4Z>RRp@SppXHCUlO_d| z+v$)0>2Kr7!%NP4r;(!-6uO=|BNqZQPJnBU4LJF{0RIdz`p~4%9kN#0$1~c!XNaeJ z7!eSF2(lSv0{^o<_^?l%ONP!79fpNW;K`eSW=9Mf%JO;>Alrq0|KorD30``7tph?W zSZmU9@Iie|cIJ&~gu&xLP28)fFBWXUQFW&{e z70yuJcCE_&0M|UHIznSsM~8bLJ?hakRZ{((8hbv=)swk4~w zMZm99XpU&pEMDIg-ao^0mCaoxEAItyIeW;0T6^2^37FIkB(%iB_2e3ImXJMUcAeGm9VU^0 zBd6Ej-9Cwc12cEi(XZyE6(BeduFD3uCAhZ218*73w?gQqzVnpvhiiaN+r|J8dD0=k zE|2ihDl_bMcCg3Q6`Tlce$rYLY?*v7P%;!PgAGthU4=ADl!yc=5oYF7@r=BtATZ}H zCQ2bVbB0vdpsp0b|FRInfQnMXNM0x$9RNweaB+i>&(=ozY%pdt3L9>#sUl3S zhfcSC1MAJh+kAz}a0;*X?%hj;#l`dt1!c8&e;Ez;U-^^o67p%J8`rO=A=Rr|W&=_V zT37fJ*xUYDGkx-lU&EMqD=DzilC8ot9XxefHnqz-9EX0J;D_Yz>$YI_YO8Gb~%h>WVxF(2AIECq?)eZiw?EeZv5*sO-u z=yD(QTpOckH&IF`Alxws7h3BO4$%`P>4%4!HOttZxk}soSiP`%_`@oPeX%gtL- z-l81NZ}G+!ZR#jspo>M5G#Nv;&Z1wqu|cIgmh%ZILjXaJiuFLJe2=azJiKAe9K7He z$fKr?I0CYZaLyCHYS;GDJAd~XH_N5Fn9`>|{W%bI|TQ-kfVhVQLRQ;S4a6 z^kTxtC1gm1%{k8rlLYzfI3Il+YPImq5Z}qc2|R=#<(c&0|YxmDwRI3>Alp$*5=G}gxLi)kzcsrc+M3BGYShC=DC(Atgisusp63fH%ohZcl*P38#Ox9PHR0nl?le81lN26BbGF_-|l z*GSbcC*K*S&G25s@woL30fS zG^R~hL~N0f!Ga!M(jpa1J&Hc2=`7OUC9IBIwV>Sm%{GRcBXNUR$lp!p$O*mu(8w$~ z$Q(+5Epr`pt~fT^VpHoY1n7PE%^txH;V>(H0CQbFp@5DC{02yP1ZL{n7Pfm z%{}eVWg$abFunD4+fM&NJcy+c-fpHWi3 z1c-|4I;kc`sx|y(aQK-@agimH8`)7JqSZS&O)J#BG`XjJe$PpBknYSWmLB!0TiDqG zS2=FE&R*E+EeMOf&v1s~Ir(P^9ejF1N*K?7N&c@R)g9j)sV~Yc=s|;9-jyy3$OUEU zEV}p<2UMi{@wZ+JpbfngkRPZ!zO#RDrU2UKD1%|d5Z-f&l!_{K1V-g%(dRnHT(=Yb zCQPZ|dh)kL!SeMy5i8}_1oxOAZOsV9!8XkeaF!`AbG#-F(?~ccvY?*u%k;Y*(c~;? zL)KquRAc3dG$& zK+ID~ICtwNy(|fFkxI}xA|LYCchm8w@29W*i$A9A&k7za1fm`-z=$5kt2UE>VAdRp z@9vxLAsDdoIY{>-3&fIaRqYS?o!SEkg(1AOoieN3_T zP}Z_r1A%~QftVqA&P`%7w{C_oh@Me*Rd1r_7ytkwm1(s&$5qD=l!}8rm$79w@5?sx zB~!6a{LOYNV?fM3q&4oRGT|Z8;gbjH+kg5eX&-MNj~V%%MFeyXIw>LW4Xu_@s!ObC zmZW#5tJNv;GLc?ym|zM5MKPi5S)a-#ddjjtWdwu@TEoC~I|;{k8={h?O>v3F*w9^% z;;T6(2}SNM_wEx;?;>oqAl(P4S%n?xInoZV2v7y18d~NyhQ}%1#~FI|2n%UjrKgZ_ zl8(G3m=5L5=L}J)h^1L+Qfu9?6iW*Fa}US+h>~UB6~KmgSPaj=eSWk5dRJVN|KwSP ziweYbM3e9V*BD;hIo0|Ts$4fy!P=kIXq7x6a5LoUImQ_v{_HVrNb=5?ze2#Bdz5t4XP=T8m6fYHLqxZ{#Jtr;MM#*W8 zriq>5-RP)+puHaZZ8bN-7+FVJ$t*dGq76U;jK+7@1l4(eJXCr=_4?@Sg5zev>EQe`v-vyC%O3aepoYlnp%Am@s*tUM=6R9CnYC!4OBt1oO5%EU)YboNyQP z&uc*|(6fK{zcGVIa>xpSuk^;>lz#8qI2g`MBLhtGG3BP9#PRUlfPF&_ z0CM`YSNy;K!_U%x^{3xt=mv&6fY5F|c?R^jMx}sZ`g1&*;ZWfj^d?zcX#RS+Lhof9 zC85|kJDon}-)rco8}*9mX;MEb6Eq}jX9)ANR%ESxtNFRZFqa;V32EVi2;E=5&J?Oi z26WJ#tV_G65&A)(YI{ApjiqIh%J2R7N5~oq$dHp|hIa@U-4iEYnlmg-a#J$z@bd8- zRn7-vnfDR1HtAw73ys;PVuX1k^?_vEdNc3`2+ZkkdZ2@1I0K#%*=RpjQtkjuUdwB^c0rr^Tqm8ybdxwijGRfo zWAcQmSchE{#x*D61a8imDRbl8;BSGyG|3-=FYNSSVV^`UFfMal@2k;o>oTupR7Ati zR4(~OUb!M#B|UjhpVht8=_lY-XFB;U(kpmPw$$SsjRD{vTb9+RT`+3m{#wUmx}V0r zd#>}Kk3nxfdkiyzhfQI~!S@<_3wUHLaMM5+Xd8jjKrx!g!^NJ+N77k`GwQO>w_pB3 znghQ$Nlo5lC&7867=3Ksb;a{ye~lU`mvrvAXBI)K{{usCM)IacmL0m5$T4<6&o6%R zQ=C`GeAc6PcA5JqGu`XpRh(HyD;(%yTQV8h+-E(#(vKZ!qDg=>z48$^fZ~e}^Yrnfp;`r9gM-bQ`=h{xfK-kiyi-uz1bapT^9a$H z<;400om@i0nKf4BYj-m!iJPbYmGVP1P2N~E?5 zQP6GQ{RH{I8U;}@zXFc+6cGS|6GI4&{);fM^s0 zs~ophILm6yH^mXPsx#b<)3^jz47K7=?i#m*9`SgXrh8GO%iftvnKfW zi@*H|0$~HD!qcpt0*H)IOim}OE?!H0rt%Ck1|XgSU6_OCW~JjmuYW4&)6I&!wM3-X$mrwR%+xH zAzp^u!_bLSs_J}BML7nX0AfDdVd#Zlri)mNw*BZ~dhpgeY2(=!QVnoBZ6zq)0CRyx zc-tzxJuG>=Fqc@f_SlU4B?NKk3YKnLs1}6@z^phQ-#tlLe*GS-5%1Ml@_tzaslZx(a5+4^i{&4Dg1nd*2y~GNC_&RyBJW)fKWDE2 zO+X#~^=G3-oEg_dj?qG_Ku=zT>(MSo412~}<`IVGe_fHk+e4|M7kHj8uY zBY)Q*F>S(vh8%PmDFduU@FY0y96wDNq9G-~|0ICpoY4P(CC}PUiT77nYveP~>ENQt z5Ek}DBj0C~$EO)pTR%JoeAMX=X*Ip)$zE_?8Z)2wQr1(imsHv@RC-R%rJ&y-3A0q^ zSEu!`0iyYgG$RdZWSaLaXwo3^ob8g~wKXX9fa>9`!W*VZbx{g>f%DwIgEvk2Z8Lww zKb~f2)-xsQp&Ko$cNnGWAdH|pWMsC&_0DNqM%6rm7NN*zFe2N?ENcT`7Q&32YJ@4- zUr7#H74R4h^LITfdJrur)|;aP!sm*7T*$;!A|<{w7DI{P3u#n(7L<7{jU<7SX%5nW zAX#NEo$x3jKlC>HKB!+T2A_=2RQWG})&u8wTK6ox#^?_tF?b6wq|8~g9TJL=XKL}9 zp4b-NnkrtYelM5a|MdgFS~=Y!g0aZ33`Zc#r$z_*dSTg{3F7^cmkOxS7-8cg@4Z09 z`~;D~7Dh)00ya<5wVMC#^SC!$g@1aYI1hj0*#-<&(d({}5_zh;?%%A$FHm4-6yb^? z8BO|zXDQC`s%rJ7BL01-!*L z1gxI#u+JyQ45NU~hitAOHiuC+L%7>6KXwAB@!E*}Hbk8(jB|<2#}C69zMn)V*+S6K z#zq6BAM=bSq2Egi*DPYx$kO3p%7Q5mFbu_Lc=?op&|pL#;RCTiIKkLw(+^#{5kJ!r zogx-qNoaILWM`J{2apnHi7sUQ7B;K%3xF?aaGLH&>Xa)$oAjtG(2!JTh$E_`OSAxk zP#Nn+_aYBXBZ4h3@@cgRFDviGRKc$ljQUycCq_{w>)F&8(jfDkid;vsEwcvGa0H;X z5NnsILvU~ToZ)$$B6{p+XO}o<2EHH{s~n;RV6+s?&moI&qVTL5GVY0naEbFj#W?ZH z99YPcb0E@(E>;$Y&SF1(rppt0M+4Hk?2XTG@Y!dP_U4FUv4R7g)K?l*K9doP1#1!b zu-@`0_=oSqX~zE9^+zn3#o;0-KLWJhSYHYKZjDV%9Kpj!q=$1n*kj5dX=ip^;Aq(| z3?u&20M}WioSX*Sw!s|gKHbpjLOt6ZfN{nyROnb87IrC7DGP02J_Kz z=0xF|3Zf4~*eJP#`@Bo5lRk1bw?Gaiw*e|KYI2zeCNUoYq-A$`di_fMJ#WrUM&uJzD8U8h#2XcTwF$PfVd6=qwgeAu37oWeACSZ^@ zLiCbnahlY@yKkje|KK~cXqiccqpkGufBtzoB#-ghi`T;Xa!OI}X)C?`!`HFOsY+)^ zgDvB7Se?i`KqHEFatX^Gq98Phe9A#A|8#ykQx$Zz^Tad+gB#)M}_eq!Nn2SU?vAW?YwTr8Uw5 z_d?-uHc_M{F~X370?)%9dJ8Mp(vSb+e@OrKfA}9!xLgWG>I2*-^{gRqCrME`p)Fa2 z!T_UZSb6484rt9}BKift%Mey1SIHyFVkkEEPFV|d0JADI6k-Y?3*!VtdJ--WR2o61 zoEQeHU?9}-VlGl#VIHez-A0(ZUfvdwPeK%JK%aVg{apcV^{~HdFvMB#ek6i2kTr1~?ym>IR$iu0 z7#`J_QsE06{oOvnVc}93Gk^0r`d^ZJ#p^~a%Cz;;6B^z_&cwELSM=UQO$Yy;p91I% z2VyWPFl`2&#|W!>O}$bSN0~!rM8_c@W0^7jK7^0L>n*`2^nuiY9EzXha!vlI-qX0<&fy}7`D4XQ|(*%X* z23(+QON8V-)uQ^tyPh7uFcZ z%2Wjlx-4qMKOR%Trf5Z_($vfD`6wdw_*pcA)(jxVOXH{sd!nbOM?RSf!*JXs%Gwq_ zMb6K{$s9lw>ucOtp7YgvWP3BcIRabaz&sDi8t~J+GI!Wq-d;=&tC5z{=d*@Aef{hc zK-`Kh=7GLPuq_cjKv<0L2HKh=x`9MYhp=%f?Gj@4iiKomnM7;Z|Xe~ zoJJasV1}O~>c;1z0G4Gy8gacC&&`c(@#j}WOrWdqLdrYxD8I-1;9KRD@+iY32nSZo z5K@<3Z86mayele=_z2E=bjZ;w_U0QOr9b?GudyBq{^)@3R7K^#a>9uc8lP?Q$Zg0k zzu20~$!n&yWT}zp5cW691ad*C8>+_G$Fbnq>FsignV@Ri$JnRf;dlf=oP&G@rZ^Be zDMM2lFc$f=*#9Ee>XOPf%G6Sx9CF)yO=8?lFw*<_vm5Cb-~Tb}U`xXpiroZs05&dN zARS;1TAc#SI-+%gwe&leT(_ET+}RBI?po}gV`x|qRVIDGk?|Uic@9>f4%0;5rJ!6I zP;Y3QHz<&HLMtDH9_hyl5LLgLAA3mZ&qd9;4^k5Gn9d8vq3NRo+6ppdP|%)(&Xt$a zxWH6!RzilK;sE;GLpsUx0M!EU3YsGF%(K|rTJP>8#debbV>>RH(=LxqW4LMn$KWFs zHGCewvs7tSqtzU5c}N^0Ae}=0pCLa~TGCcH9-yHv{dtwMMkA)_8l@}|*90Db&d%Ut zr>nLxnwz8oC@1yULh`&<`A%vc*A@s0hy~OFeI0X7af^r`d#R38hUf&b5G*2jE|xXx z(Bd+YYM@N8^9~=6FTvdDK4Y)J6%L6qVxU)-N9426HLt6e&cH!VF*PbHI8_JA*S7wO z!Y^egf7AJ5cMj7C?6fgOR8_~ABj8Q{sT3LVg^qBzN=!>chsSm3Bx8PWMT3le^-CS6 ztkxqh{;{oECLB@h5xnP~SkQP5cvio)T{QGlVhy3Q`B`jnB0XqeD0nz8^Q%9PD2Y`~ zq4k;Ff3_*|)yo*50F2mI4wKK*4GiZSCkrU^fY?DBr8hwOoI)IOoEn1N1CJeNqsC0i zZX@6rUyXnnTRNN+f8(FZTxHF5a~x!VVIJI3bR!r}SMwaB6)*5Eg$D!$mxDX2Y|7^1 zI@Q^Kmu`RK>**=LcW~-tHu#Nya|rD^%y#+UQRIAZvj822IuxQ>c@Cv;eO(SjP|*O$ zYyE4Hb1FfAS|R47=*4uCEvAA3$Snkv@L3!2Be!^7J2OCB0&$cCSQutU2(kwX@FFcg zHox)J^b||f31J;9`vKnFS$Z&5?GMM{b;z}`iMzSGYqxKZDp7@q4no;a_VY}8dMjPu zxE7$U$NDa5TXyrSUt<0{g%cm#O|QK8A`RlZ>Aj!4fsmr-Cqlxs2!**VnCf^KCZ-tP zFhzextQm7A`w(RvPyuyvMvwz}#~1sb1Xvm&l=Tif0u@Stu^%75hIRJ^?3dd=ho2{^P zZF2#*xhZFFWC8gZ>ljLVaioAa*3NQS2Vj?<^|qM5rd-VOe;gw;fF~5;)G|j%<*agt zPL%C-jo_{EIrqgu9Af}#$^^6%PsO$LP{9WlK55B11x@c(07Ldw5Gk9vmwNIGSfmPZ z-{Z?(({QPZUoas(a=EyN`CT=PD1l=J&26>33P=#C1uRIgC_t&0wV^$)W!RhY{00I9 zZ;eI_DGP#=65Sd`043gxS65FZA^Q%%g0!LmIn8>l^gN9)EHp0l`sa!FW@&5Hrl2Kn z4gx#ku5`Vz_mydRMQBol4hKw(vKO@lfl=k}-FO}zojqVJJYHOb&nx3ibr|ANZZIst zG1%sn4d_GLWIc+ah0Qe@!2|rDxhmJEkqJNnxrLX@BC<$9fi|iL>vK|ea=P@`ye@#p zNq%=;eKEcN;~!AyNUkd$3Ul|JYG=D1bI&a-v48ODb=o4D*Q{|52sj5YM(=Pu$B~|) zEa1A~wddTRt^i&%bAV<5`)nMX66dFfMMZM~*i|-#F~EMhw>@ZD@2hfac9E7-D4a53 z>H%$}RIujI=Qu~#r6ChVUF?;iS9iLC2R@JYN28$3dIp3-ZAnz)I=mSpnaGTM>$xk> zQ6kDSh$Mav*)Gw+yAeu?^OJX>d)_{Ymp={*IyHo8A*)P)4>M>;qsCTydT#uvAlITa z>Cg2A@1iH7*i8OmjeU2ROu|L1p!Zt#1&&BZ$Pe|SUT5W) zz+B^lcR^T2c$yaw#;hJ%3}b*Ti-oQM?a;fPSdAjRt4-3Dy2xYOKd!A#V_)D6B^t4| zvP6}>lg!W)02$N2$bN{zSUviVIL|}x0+n6?z9PNN$b!H?IcvnMz+MP4I5y6es1Yd& zK1WR3gRj{MJfjq3qoXbxhP}Cl;2m#26V;QBFaaz*Bwh6gGRNn`Y*;U*9-)gjsLP|kX z<|-p|AufN84k?~FMr@0Lhim|mJUS`u^w@6F+@72Nj42@q;@Q39w#K*$jG0(zVy#BOE34g^Ew_D7$>^C?~H zP*6sju49j633CKEvF({4*|dO+Mh~B@qU+2h)$w6?-}_zyo+HJAYiZ=D8!bXJaw8C~ zVlYe-0X-#pG^S~u1O4JNxRyGA9;pC$B~R5z1~hKeDvJMI4U5$Ag8)U&^PRtqkcSSm zQ68yBy{8?G)J+v4;=W(QUwLd=n=)0ITd(Q0UO8(LjhW#cwn*UWH4{wWFM~=pT`okp|UNPMD2R?R@RpR ztlSTei-&02A6t)=M|emOC<{!`y8XrH)9}f|XgAhyY8IZAA}w2{2+`DGE?b4jEa9eR zK zg&hgk84}V7J=r5q_dH$uy>B9DPw9M5UrtgBM*MB<=AE!w&#~VI(J24`KmbWZK~&tHy!Afo zr+o=6ah^VSLjEET1w%Lb2nO0iwdXboOHLDlV{eX+ASf&>j0hEMwNJOyPu^x(6swTm zTRYu@p(qx_>Tgp8G{Lh8VzK}QEoYUAVJhr_@Qj+*Yjvg*k*tJ|uq*qcQeqVmHdk5c zDe+5XB0v+^y4U9ZX(^j0WPd*MZw%W=SXzu7ycq*%qyr$DUtLds``7xi>8Nd|)g}5Pckz1Z(c%8#(M5m=;A9*Ja0veE zC==UCNiR+ulkNw4FH~AQyH=|qBJ-7^f*j=*-YfP;AtTf43sWh|YF1~vHkF4VW5JIC z*^secDg{SWdKPd#pW)e9k044#AD()IqQw@rv2iqb9?!)5jXU&eq*c(vPsuT})sh}T z&H~;YfY}|XOZ*b$ON4|%v=QerauY#h6P^);Spo*&U3fbkt?D>(P!_9F8F3%LaSsK_ z#n}cyrt8;Rg_2|49YH0$V^JrR!tw>@JfA|4XXMhMnq(EHsX$A3I#8qf94Q(CssU4UU8O`dU@dsX56E96&~Av7vyeqj+5 z2(i&^dRU`rf~KSm792E>&}N({GC?pU?{i*ul`#i0a&t8XJwm)iN(E(x&n|uXV|cgu zL7Mgt?uFhfb6HjN(HlL^l=FigB1{|l!@Nm6P$;l*F*3k{*Q2|GwC=t5fAiPFxZrn< zYyrA$2*bn7^)WRAkZ80T&J{dp)D|c}^bBX`NY6tD{_JND)2-W^yvDN^8JLo@jXKK1J3mHRge z(+@wUFnIy`r9~88;yH?=7C7e&;6a(wgHKifdKParoC3-{owhe3y<--m-kjSCpBZ6v zU9e9$5a0waw!c=m@SYIp`+&7=GDrW?Jv zgyt6k9Tu!vWTp47*9W8`BRQPF2h!LSK(h&uWSi?0~ufqpYxGDidiEyWt)J&f@%;CQe5CLpW0Kgdo!y>`Jee^d-9McDp zhsqEmPdWBS10;Hw6HyO-MCKY@@&0R}oPmepcpbzbg+QGj!l6EpCwC4o#Q~?X{%aSyFc7A!{b;I8ZeTN&2L)1{KJnBN6m=91PCK3Q{p5uLW@ z$^(GRDT)anSP@zkU3NAH(9KaXYcqDe8MDu9r3|5^6Rl*tgJlYxES4I=tZkTR5yG@> znyDNWnC|SeCls>LsGDl>`RUbEW|Znj+dJvm-+7Iq8TOoQY@HsZsoWgG;U;MSG$SW8 z`-9hiME(`8Q7!-A;X{_raRJZ<6g9LF(n|=wF8R88l(ku4whG#v4 zXU`s;NGZI>)Rv9lRI5AMNpjv>N&O_X22Ygb~;8LsKXG~41?jeCXXy~TNE z@Ct?E1}H}BJLqv*;eh37h@2yhVH-uxx1@&~<$sx{Ee=epQWS{UqRsrr$XDfgqPH&3 zQC3j#pP}tGpV!lhN)JJ*H7ncrkCy={dg*wN0)P#3uM~#i4dylJ7%{;;6hwH!#4|9g z%&j06gepj)a6kjgeP@rf&_fw`O=wmgb*BVhrR zdE`fQB$3BC1lp?YP}s`T63$gVRq#euh7>xm16OCq7(85!3r8rFYqSA;4?e!jxtaz* zv`1R!-6~=|8TR*{n_r?A>3sV5{rj9FmA@A>EJtRT$LVP0@uBh|)@vlkwJA?@8gRIA zZhBYb8KX%tEes+sr!Bf^py>J(A2Dl%-^rI%d82SN6!lQFp<_KU=A@y10BJ@WpjFP! zigXpCp+142i_q8~l7p+!RK+O}UQ4c1A-6;2YM%dqdkpc;Nw-B(7t8^zEH8v3fSn(2 zJk!G(v-gF(_ ztLKXyjkUEYG;^H1&t#wA0B@|^R&y5X78nQ576H~BZ3~#1FQ)lSD{VsWAH4o!oCX-_ z%X75L18e|fGW@TBk`OX0A6L89&zQpRsXgSj8VCqv7(8C zAh?8wwoBeQd#AB7?nz6L8ODg6W;CkQ(ca%q6sIT0q`fT$FA0De^;$$0O^p#a7LkpH z)iq`sL{O#y&wb>X{A}u)G-9`&Av`LbDnq4Ny{&d=kZz|4{eQ00J_l(IBvQXMC?XTY zTGVA4)g{L0EcdXEg$|!VbIJr|y8LR&k{vaChD$mdSxBZw*7^WFx*c$qElbsr0z`|& zP3cT_A#g1Yqat#wLnR`r?1P1P7GoQ&^F2lk%}w@f^e{UFNU(33f$iUPGUx#}g-VaE zYneb!h^H)=EMf4O&gq&pyggU@dOJEfie}N}=5D*kI!?^-RjZgzpjTN(=D@T~_C<%I zk$xlK>Nebkc#5GX?k7EU+4Bo9K^eZWYl6jc7TQ61!2@=L7^rvHix}_BbBI4oVbLiz zNgdafsUz&6^1`&jC`>bDjXdWQ3?pT*4qN3-i*z(~NQ0ulGQ&EQ$rd|?7oT^8x5Yk# z3VW-dRTc^|I~71J{Hg%tW!@{WP>|r@3G3X=i)?rQ3>f3sSr{HqHmkacM}*CvKvbq$l0%$e0h#jzW&|-O{5J-Z^p(R&#Fh$H8tD(}d3W{rYi z6;R~Tky>DbeMg>3s0us58UzqlnVx;qOw7?Vf7iBWO$ZpN0da9xh|`dmz^92Incvt* z_dmUt9)0pbnz^<~I2w-_eE{o@Oh)NAbw_(f1D7>ED(lWpDbQkh18V2c7DjJ3$2 z;`${$LY&MHK&rPuPld&WdFIeZ+ZO0P{?7VcyP`JumY<`KD;uSiZqG%#g_Nh~wGq?t z?o)E@Fc8>-{iAJoi<~KpOW!-mb7WiDbhwMaV83U1{xjClM>uord|q!oh6%+5mzSsH zx*I0tdAMgj&$Cu}o)R7yjHePtRvsnYyFBDth-vfaIg=s#+l?YAF&25bJcmp7X+qAh zHv%B{$Tih=chZ^)98kNyY4tobcvA=fvsMBU(+~KaVd|-t-5qF02#P`-n)phaexT*WMd&nrx!J38J?yYx@*0{9n6Svy5}LlS9q9nT|tZU=%Hl(}n& zGv*m{Ogp+twg^gmr*p;s>Ym;he%T{ge^iszXD z3_FpsMA$<^FGn7`U|l1167OykS&<=ys9aTkYoOs;VQ<-1fF^~|T!$aSK$I>*$Hw`> z@0_b%c;8XMfUsA!XJ~G#T^r8Gzt<5~B?_m}(MAqylolcHBA&e%x`WQ_;{DXysny%C zc7*F%?5H82JU?MrlXQ9B$CHW-)%hZbvw&C-HicZcz?tJ`axl562X-P*<{RCWHuTKO zR!$-e2N60^L$U&BGBTnp7n}=sq+jn-#-g_ii#%ZKxUu1kp{sKP#*I!G?5PnAd%kxt z$fa}j`5_Tnuj@Kpdl4r~kG;IG3j+jHW&kaHV!ytj*)Y7sXaGqm;}A1?b)tS&%bA>1XHI75RC>$>sI7U zm8h3dmZ%G1BTOGG69wtENWD;hBPVP}S^;O6)+7FdEQ8mLtY{zszp8GXz#nmi+azJx!-qZY55V%7!Ic@GT)>y6^PxrQ>wXtPr=mD}!f zhx7D|XYe*V2~R`6foIx2)Lx$_V>Jq3ggk2qU{&V8bVu_!lDZLXG627r%cGaakiAuf zUO{)Yg-=7@=RM`Z6%3A`mcD{j_`ecU$U>kf6uGto7%X%# z+~j7sx9*|EqAGu_Y$qeRPv*X1uCw+e0A zR-nfr8?b~Ry5OPgr&yiwycG+0cH$gSrUDM1i}%{8MMI=}b%GMn`%r|I*U~vrj~r(q z&36$(>?G^tIy`>{u&Jj`0XPEGjK~+fJf$@uT}?JNm($MOyC{7MGdW|GVV1R4$v-wE z)(70`iF91FlLOCNHFqi$Nt=BkkV;y_SP9s&$#!BnXvZl>vPO7J`K)rK#I-VXPOB4i&aDEDY(_8eoun?XZ8N#??y+~~U z&HnawgZP|^T6v+ui+-NSVQE=qB`<00$vlw? z!gpnnVRaR^d9RarX9UC@fIt+kC`WjYiZ5t}&$wQ5{p1Oixa;_Q2nF1i?mpU1zxs5W z6dCvsV6PF_$C=Ot4B!9oLHhGQ`4)VOOs8#@qpa)bkOnCX3yl3O0a!PS>*yZ4KX8^( z7yBZgYV4@s>v$L8KAMx~lx!ic&gzj`j+VvQCb*$a0GI+j#W`{oO^SC{U%!_=qIh(P$f6o-VX2hf_`#0=Iz$4=)m|mqm_x>7c=mjm zbc>@S_JuRWXzoL!rb`%o@l1xuOMjmv@>4_?T;g3TNI&eUy3pYr_J`F#7u?w$#6bdr zIZ30-^?WwN=0?|=46bkhl-@ye`t8)kri~xQ}xSZ$jGz8s|WyL0g=vV zp0L0-iZ_uzC8Cz5Bu4Lc=;w+~4Dy!OlFnOzT9y(~4n3~|G^dkEO!7>e4}aIV5(@~F z6zuAK`Q7OA5E>TXTKpcvCb*w`<_KS-F6O;9sT&Z8O1sK2y@aejctJx>&-Mj{dpaJZ zaa)qLO$TGNlXU1AIyO9K3Wxt;AUi-oPp456^Z8BBFzq1n@Sz(4o?zGKmj|4BcFsC< zigsWVS3VZU}st}`=*ZogNV90yqx077$ zK|PzJet`X!59`prbUaDKNLk|GA5*|)c^*42Ob{i~<7(SY^PElBvEzr4Nx{B#OdX38 zdA&_Eg>B~xtiT!ckv+5Ul`ZqEOXyL5AOzQdZDA1k{PMd|5wU^>lFm-|*w&P`WYf#; z7hI!d9WM&`#sdx@qOju?CcH=G z>sJ6;H(TL0et|_B@`u6wo!3n;pFrqaH8{lkIlnraUi`}Q;c8yPEBa`AAIt9q<#opN zv3z>)=IhbVb7_8&F~T$H?Yrk`_rZPIc+IDcg@yF-Pk+uqaSyIn0YnWrh!DaYjGjhA zTRsN=3&vI$s%*4KBvgq_LsUiG&gsckO*ASshUS=-@wx$5+Ei#lBd3xNx zxV4ccu;vf3Tn#T|0C`y$W`a}=8DkYC>U#PpF~^f%;BBnq{hTB{VFRz&+%pq&VfaCM z?a%%h1VvaD54NGr_4O^hyjOT0N9oDO_tVMK)3ki&74o?lec8;U<9!PFV3rGkJFRF2 z0-96AB2$i1cTJAbf!y1OOG$<_(uW zd08qA$T{AxrO$g|HVH<6GSJ(~PjLsgd_id zp581-((^jg`!cICtFp58y}GKaSM&;ErAUAPF%n5p9EC_N_KYk`F^(`DVTWTb9G(a} z!VWv;#y7TO?i}{TgdMhJ&S=In9?e)3N1`Z@04Wdvu{F?*?nW=wweL%2<-YiNP7*G# z+0|8<`Tzg-o#j2}yytwZse+qfpN;e_-6_i!c(Rs0_}SZO z`B$D#r$sV?91L;5AqVmxy&tpl0__qCH2{Daf-`nZCd}g$4KB=1)R-PH0eF_yaKN7D zFoYVw8rO0yhR9xxK#y1kOm&d`HqBoUY_x{REwWG9{BZp?4v&%|hL%ty47gl4LAoRG zl=(V_KB89ERs}XFtYCQMjrSol&UclUE01|;c{=WCZgw)dmPg34*>ovgLa^wGXXD8C z2w>4B5V_NWn;gK>YuR01#<2a~zyDn(9`HJ{8V(q@F`-xT1uOv6bVLkui>+_KOZBMF zn_qi5?MY<-@YJc|6aesLMt(% z(+$!5q7GlWw3I&j^cD`BFcNz>$@QD)nkP4f3?R>zCS$L$O_m`A@A9mDE}kNS50nYx zi@o4JMhWg4ILbP}%G2RG?5pKPuiuCB>EisNp%LA;#Ah&?Wjs_3rxaOZ?3sW+%6G_l zWH-F7f# z0a`p4D1c{VZ2|o}MgW;e24EMXM5}G&7r`CJAXptzPlC?CZ$2-_S~ezo!m|icu{{`S zZ9{OC`8zVZJY!F%9e0Iu;&py*FIcuzi48^`)N6lkLyq73`^c&q6ohjlz~tIeCagMW z3o=kQoK~^I;YHa6^bL?F9jbfs# z*Qp2`kqsTe9-CBX)__Y;Ec&o{Z9d!ao98btrjrQ*IrMG;*?joPr)hI-HO*bW7(UL- zreJ26N|L8@YLi(m==lVm*eUxc`~i~0$!8o)!Dz7(jb$fwvan>qG61ttX5x+jb`7-T z61R|Lt7!5$Jv!G6{expJbRYsG)ZFC(NSI|E@Em2YcHr0=<8ps>>3bBtI^hd9)LJ$ z$I)s!ma@9SwRX35Lrx4V1#+tdnsUwKa2P%}GR!8V2y|>z9^s@k16L~yPTOcxm9~k~ z@n{AS``c84*)uC{g2gg+G?GN>H|rF@p7rn)QL7tfq16m=m9!>8Fk}dsVO?V46n}dJ zv4LbXyz$-`oyGd^Zf!zY&QsK^dKygemSzejhcRvN-45%K$Bc!hq=z4+DJCsb(JEbFD?Skvn2D5H`jtz zF*7?24G0^BuQ~yod!n55!6IPXiAg!C_R36*&hvA)e^3@bGgxBZks~p}0a(Ukk3A5= z-UX8lQfca}8xKlik17iLrar6h2FFtL#41Jh!&CCC;vSQ@Q?5ox=#nDf70~x4^ z+8YyACZ>jIU_K;p0uiDN2$I3QDgO@9p9-#kgw^Ke1Ih9x4Y#}1u-1=*R;U~|ge67@ zF~pR|Md3QI0sdCG4XvL2;?rRyy+KEQUAT5BWgo)DBr-fRGnU@@>8ETi<)n=@6l#qz z7-fVJE((j$k8+$q{-0bT1Ip4J9qdwqBO(UeI2=ENQIe}-q-SJr`PqfBJZW?l92o9d zAR^Xdbr3r_C`7W}D@Mu54W|l}&xf$M5jHuw@O>EDK2F%_z~R02+N)`WlCY;b85yMGJX8hcNrsdK5Y=KKYFqn0(JeV%LpIC zssY-Y2ODYi{z^LY)H8r403ATLzR3`5O7p=TRDc)>RRKWY87Py<#hFN_7@sknUg-&d zlnn6Pg>>w`xo4~mivaLqZP*e$0Wm9r2-gpX)1(ArXR%6?CF7l85Bu5QJ{+DIo8Xmx z?pRnmGfGCD@_ST9OG9!<7NCH?%|t3Q)CMqk zHV+YLa^T1tfARhFM>lW6@uq2FYA($zEzn@JpAHEU9I$O^*)8H`m=AeKW1b^_=tu|n zj?YYnqV5vqwE?J3fv5uXEQ`8VLjcS%%6E%2f?PibZJ{=j*47!@VsecAY-30!m#?Ig z1|`tl6=H*-v~Uh;2<75F8`*GDrK63=cE@Z-_F{2vKFwb{7v=YvndvmcJxoz9pF;U2 zC|i#KP(3AUz_k&i@}GKQ_9R0_T~44ga?J9WIWn`}!umQRQ{$h97BtwVs=@qA-pLw+ z^V)PNgRHvn0zGqGV{L}MO2B4ArsI;NSTt}LCuGLAkqtY6+3+y@oqG`amu5IQ?JWC8 zb^;JEOBv~tzy3)yicCFq9eyv)ihCRYH1C&pxepvDpH~O?w$L#k5!XiE;kj0BF)$xl zNAdxP$O8MTxv#ND>l;r}f$}GPpI3jP6^o>@pGV1T7MLsca~Z#*zG zlFs134Qyoknt2++Zj6|D83mYiPT_8P{(CpglL@X|x{`MHI4oicIYwW0=sPf57o)e( zr3`>PI;MG6n@SAk1nU^a2%x2`uzg{$D_rvkx>6qj&Da_F&EAhZK7vv9u8fh_DVbe% z*0tq&XLkyQ<^HM1W|r(%z}|1M*c1Hv4Ei=-8oJ^{YuN`Df`8L3h{8bOa8M4{;h|huMVxyAHzjemVS%NhK>9Y*s$DDG|MlJ*cjyLlt z<6#4J0CSelaZdRzIQVhJ7>vGEaRT#fyFlhp3n4cL`3;}XjMEnCs^kO4zF0_fNI0(?>bprW3 z*KqD_*JlBIn=P|)8WRh!Pc8hU2Ul1-GW%PiN@Eg23WNJB2(4qE-Fqj42m-AVsAEU? z#t|wElK_tmvcEQv+E5o2V)is974UEM-#xN2;QBkhQr*?JPoH5r3$hv9OsR(4pFpcb zz-lJcib8*b#!dqCs7`=w#0h*Hz?uftwoP%Y7kVYoj7EK4BiB@D)n*?}AehDQJi+HFdSpMy=M*I8q+8LnQQN5T^%q0mYU%H z`5I+hJ+@-1PMh^s&T2GXo2hK@Gr_iNBOPyoS(hzbFZjn7V8D&u=Sg>sbUUk@H}EaDDpV+Gsa{k2FxxCk`Sb29{zBPhjh^ z#!@?a(AT%%E5pW;W5JF|*AUpo#15_(v|2!}_Q=T)(VWXYnvm3w_b3fMF#of9RR@sd zxv4N7yeL2`O4^I``MF{c2AxHuY(zQ?=i#D4Xa*Sa7@=5w{*F0-=dPY75}d|R!lxxd zI)pcMs3E|4oxgl7t=;+spNLV$k-YKxPtv7p=b7X%2k3!#PFeToKYlX+&M?uYGz3>q z5T=umknZRo$5)xj)xaBFc<)BF8fXtr&^h%3G$gfq!j=N8Y4ZrZJPYRW!43%bpnzy3 zVk6}17(KQz1Wn2mqhx|=aG<6bQIbJJvH-AL|$vQGwwQ<#K0 zuwF^T`f2;tolwqazx5i=Poo|{_Go7-J-B}dgE+|i#M!j=*%LUwU*JgNUUN-{>y{8g zxlv6rz?@H*s$?byy9ymk&a-)l#KX~0k_Ff(v_`FhTRXx9$TrDXa?p@gQM_r+ z6}N0#;uu;>eU>-;(n(pWk;tx5Rk1Y$&PFcaG#qJEBkLj;_Y`O3)DKDQumAb?)ABQ4 zB(r@$DUyj3Jd!6xISxm%y61E^F)EIgvQq2*ciFcJPDoE}rDAD$iSEoIb0L}0N=e#i zxrnUKQvR8qfY^@$7d5y#6BJrXK&e7zfB_EePKXZa@HB3NN}d(9j08=Pf50n;g%C>RI76n1UNS_f;)_s#=_Au z0*(y8%kw_El!(QGU7WV3GL;Fo%Iv=ZUXD}9j@Z*S_uXOtPPi7e3SQp_M-e%PTRnvQ z&p!JiCCkTTd)a56XaBRi7`f>*ef1`dAs#F30U$J6`MXD-0SwR?3|y5CXEZ8bTWex)Ugg3_|XJAfF4euC#RI^ zhRgtReSzyBf#$ing|xo%m=S~644(OLbB+Cga<#Eeut-LO0HD5uPI6y5wYBOt@6V+7 zUVkGsDQ}LG1(mu{V358OR&1ihzz|62DEUHu1ziS#74Hq5GWZj8`FAwga<6uscd$X? z`(ft7Rm~b`xL}JdF%Z&c<~$^&?FKl<1d`yq$TK5ySEQv7*qx_pT1Ft>b>L zK*wh(khNNvUrwKVw3S}@%G1c4TzN~+aQt!7{Y-5M@PW>mO;%iWX~XQ#AvRe@uYQ}A z^~hk?0Zk2i=CRQwrV?~*975(wQ~_*lkiyVn^jP~yVC%ZsFa=m=zj6RZ%iwnQcd0bU z`PF{*VXx#RHcookytVre(#Fb@G9QPo(Dj)8CLy=loNTTazw4#OmcI4qlxARlKY$IZvA3Scrav!XGtA`aTZYhi z=~6b3>thT+>d2*fp)A<=9NAR?L$_xYz zWDA*N|D+Llf5!lLfp-^OR89i;q5s-et8bZM6e(nzP4gL8M>*(LCk4Xlc@@Vj5U;Z@ z<0coh0|p%U2opf+Nn|1O0C0kX26j$5gCJDurKd7_JhD@sY!vhLKKB^^?$Mb5#N3Mq zdf3QjMY#m96F5r?)Heo)m$fx`G%+{GKGg9e24zSyuQeO))GM>Yy(6`qQW4`$*~=inOA?8l|9nxwKRHIs#(jg2c%0fj9~KnN!C z9+6?GDF%I2Dh>u`CI`eb;y|m+hQ~-WUpN6)o-r&KB?KzUXqYOMl&i)|upTt3ax)l) zyf+o^LwR`k76R2JMJ!*)tZ!c1D|+VZ($z~0Ylnw4HIi<>yAck!j=^Z)__8?njYp5t z;>|CSg)5}ZjR)y52L@~r0gjDv0Kmdxs_gLn#58m{fGXK9BT32aeK`Ck3SJ=l*+=PA zKB^gk3X5SFW#+*$U(DL&KIAkRk(mz9!AsetVPk;kkbEUVkHJ(~k*_|CqLGyrF3-ZX z`QAo}sb6^(ZZVnI#LPSHwks)12C7jzpn6phg}t zW}NkagT`KIM9kv$B3Cl<2p_(UdsnY0(eT-Ip3|rd(TYe_F6AdQ_g#7R%jwii6xYsi zuMILIcDWvsZJmQp>3Qn%h5WoS!Hfn}12zg3#_nIsS#s`8%eJrj{cNZhBgYoEUfsckDM*QGxJsjl-Cq%B4X&;o`k$>qLmUl*nX`+HM zAO|rHQQ`eL2=JH*_5=mSkZC8}S7#pDMO$Q=!4PXC@9jkHlunyKX(38tquB)C}oEhQE({>%*uG(Ex)mr(=RVx1j(Jy+*kW`^#qy_zt?tGd~;Me6jvQ_3^lh3u3*;l5+8{PT%7w@MEx?bRq zgZo<`9$~s%_I?0{v#1+5n;5(fNR)flYks!+IP1WF(y>hyNhSxwg%YK#7 zW&dqPL@*)n6)2YFLt^jDb~skZYzvD(37sDRfW`zi-1~CY_`!p%bo68=>;VQ4r^)&elNF zrLY$06*A=sdODWKV)9mGHq0D zssahtu>klK9N55Yf+fZk^1V|WCT$Sno*bNCV*fo2QJrdOGeRG*)c~Rv0BD|4avy>F z&p!BoY#l)e`!o%puo=mWqS-7nwSK-c;iUz5O~FtLYKjz|F=V2~-`ciQf54iJWVH4c#7F+R+~;^9eN)lz%&kgmLn&X z8~3IR2n>l5kh33xDKn_*g^7<6en?pp2nj#~_^>{qC)|&;0~7Mmlc+f zG2YdTNk0TH?56>O6=Zq1qpXos3$yu7V>#aC-iv11DZi6}j0y&8W50;+6HvNN$8yBc zRU3g*ALvy?FVCb$9C zm_0_{q%26KIta>^0mm=L!f{`MQ7H*xB-g|Mdw{HTj27j(f_RUV9|tcvcUbVavq>Td zpI{G`!GlyIg+Sd^_lnXixOdG)RD8Gx8ycJ4CXthTK`zG-1%; zTG|p5mD&}@FMgiHv>F)61opxp)IZ*PA0Yc^e;?UnpShO|@}UfxJh0bCdC~ucj}6Vz zB5@zV5n&OHqVpJ%8p5dow8=(pjpHTsRID2p!};=G6%aTGf6j7vmlK@i@0xD+;EA!B zaW2wF8IVEA0A=SIj=`(*NK-fS2%gH^7ha?(X$FDVOS4y>4g|h?|I@U+@)&Ljzz@L; zP{jS&F)xsICaJ<^G#W=ovVdxYg6kq=@ZcS|rieobS2P?Ulo9!$5Jseyi0}#kGC&gV zR?!`fZ}%k@g@^)U#RAvhxjpsD%K@l&Hn#vuxIzGiO?ll;f4cp{AEna$M@(J#a@t}@ z{m$b@sefxQ%`7gbp$oIDUn%XB4%2hr`i)e|o~F5>vt+>*Qu_+<#Ur6u*L$n)!;@Mh ztG&#D4{SEGc8@;2llqB9v&HG)IJNsmaQ-x#SPKS}AYnKjqR9%G%dH1YoPgRugwixa z!@YqT0ZZ(cUnot3ePhx2!(QUJjdFEbMw0_MqI()ED(sV|(&>QhN>v_open4u)hvdF z!Y{#|AE!v@(Hs(PBUm0Pgyejqr8)|sQ*fAdoEwfn_nZmYQ$9zoo(91~11#alyB6f- zKqGd5Pcn+;%^&`GdggEcU7o3)R%txRkohdbA@aK08)V{mot$C$N|EvL9_Tbbp`<1d z5uwxo5rgbyo{sAp2XS=Zkme|t9}=WI;rrD)AIAYB{ct5;`Nj)r={w(!BRn78xt)IJ z>o2A+&Yw-gW*lletSOJnXJI=7^p!DSj{R}|^yV%RIzd>GA@Mb`G;+zjCp%QlsLNYy z0+`!bo|zAd#uyB4lgdaHd9{&Yw7?Vsd_e<;ZMSs65~r~Z3hZZP04I5(U*{BiSOHrC zpke9+x@Ec0qnzskf8<>wz6(FgW-f)?l49+Eu79%WW7CY@BH)wfS4Wp}G?vNR6C_#c z^u3-GWyx_EKHlQgbSmAx9a9BbPy#%)smrxm@S-#bEgWH$=kFs(#Ciav3#owZ&`xv! z@isc>GzM^3Wy`ndUk1?FV7<%`=Gj+E?EL~}_9TlTT)1!@t~%KXjC%{JRWx>SUyfcn zA(J~ce}MoI+hIlm@Idqeu*69cp!P%kIs4qJG^erl2up?i8|F1q9xP`Vzhsh?b!E`TH+5lv zz!r-T#$5I5jbhT4vNq7V;#4~UEqjbThpxqI5F#D8j>XcRBha)%u9F!}M-`c!cOG?s zeVrI~$zVq6iZ;{?j%7-nr{D7-I7`4?l%jj3&tc@Gz)cMG zx%~*rH9{3tq$nTpc?P%g+qqMo&0qt)6?||Fvv_>RJpj-E2rYELGP=nFb+=b>Ly(bE z-HREprWfHgW;)G`$+b0bHjuUviYxGe{NZKVCaX)XPuKtEUCJ7*IIuexV+|7)d z4e~PtLE1Q^g#Wlho?##R(&qXZX6rf|wv3;$5pMI&leA13+Qgk;!DcoqGbT%_$gEI9 zI!PUhQ1AdL*-8!DM<$$Zp$xDRF_+!ByY(vj2QaL`WvmipI<=!g7DYXl^FNTMQ=I== z5uEX$0A!O&CH5);Ta;Ow2n>U7&gVn9kXkUwm`aahcMrB#XhK|GibdK(UIi{10e!Yd z)C#H`UFS8(lvC}JHe5Q2z%K^}x~y$t54?uJEocCO9M;S0xG(OTjltTPpf!Op z#z;~~m8f7J4DdWb%{}eM=b7D9zU8KyftEvS6;@DWRYovZKB+OWg32(7H#4RJSSyLu z2qNj(gkmOTK_D2Xqj7KUjo?VzBDN4{IsVZ}I0EHyDlwkmB}jF?a<6)W?H3et?_+@V zQ~C+aq8hZS6N6+Ql}UNDu_oBy3nB5935f)+5%N&zAe% z3rwsYgL&Lbz@b@S19NSNUk2d@DB>*XRyOjn=%N3?+2<<$EeF5CbNc)qDeb`~acv&L z^#&FI=AjgEEqys`)!9HBOa~ z7HNRt+MIG-X_bO-F@+ptt2ZVRhWXy~9Ct!=&nBDp5D7Qn1|sbM9a7gQZ`;ETxCbo} z_s*uUK`s))$BttESRjESe-j`hOmbwpIFpNMcYh}hR2j#?O&ubTJI4>?A?+|SZDis?jBx=y;-HJ^-mQD-vz4{9{FQH{?v;z_0%L{Fa@6Gd+aJWA<%1}~?g4-%Xq=lyz}Q2aqjUIFMzepWHWe1jhL(mM zU8FoYFSw0S)=}7b>-Z5*GHa3P4dqu6kp+#E!lguln0uzvn3v*Hp{sZ$#pCWvc}Lx51mb~`*bF)uW|UsC%002Vm2}@ zeLTC_fFAEjo*Bx3lZf$77_cEqwmseu{2`v(NcU)K7h{18Y6Os}8soTbJ>KSL?R53o zi|Kd1c_TglrRU(2Pa&gQ=>++|zsZDwVl#d4=ytm32}dXoZVbu~O&L2Uj0%GD${#Qr z>q%EC4zomdjjw zcw~~mnDJoij|gt((*#t9&9xQiEoBDHZ|fCcHCe*lXL zo0&cfMP!-LYS5ZEeaphMkQvJUYPU^SxFR{XtKx znhen->cIIJrfv+1ed3ip`v!0@Gb@L1XLTd=))W8SKdAz~r^cQM0@TYqvLP2<5{-Lf zHn8CSpo{9B*?tegz#_uh)lq)CjQ2Y(U(@h>2G*8c@5Gg z+;jU!OpqJ_&`tnv_JI_U*%G>Rf`e^Akx({^Bxt0HXd|uC^qQhD)yP?wy&k~^o78c< zn9a#j89La=7`I3k%&d8y2JQ!U*2Cuw0Hl_#oTIs!sv_&>NrdXEi=r*o{+~h-tP=Eh zX?o70bE5!mZIY$p3ZnrHh>9cz_IG#L@_s&pXQ>k?U_A093OuBwK{5+kaa1UpfZ`JG zb6?vOM}StF%uJll}@Kfj{o&tKxJs5C^y{sNh!l}ww|1^&_*)p;JDKoj}~$rY;sR=6gi7CPL(eUzfw zLKjY`ei#so--e-uN>eyubf52)uN~QU{H8WY>{2Aaa8iUe%Z#uGir9Q-AB1bJjuz1o z^#+YpQTW3HnmN!g8}(SeqXUA474g&l7O`fP}>BAS~?O;3w~ATKFYNk-4Pm2Ea|uqMm0zL zEjQ)(kSrYcT&*)q9z!xnXJSr5I|k8XblE8un)_iPYc%P+_?_QNoe>T<7@SVKt83|i z2qKGPm?>n_)`tLIK%u{%r0ZXKEmdfu+a-z^XDk>9GL1cdGhM#%Tzd15|8aWZTVG3% zMM=5S;R%{YABo@5ZkE2r^E`U*4&%nA(`Rds0_+dc^?v+t1t9>q5k=PF`DVDcxJOEL zEw2H0aEL$kgH;S6+4Ygx7;9xYgAE8!YNxj)8+pk(6T9=}uL?%pA09F?UtLSZ?mf}1 zq;%d9Wm-8~W>O;B!fBa>)0jEJM#18r@tM6r_?DS4I(%-I(u>B$BWTT7`4wd*Rui?(Q65R5Y;ijp}Ke%7tLSV8nUA8juG2p-})T1f?O1lD4UYoF4MG z>x1(wV%Wx@b*PUGt0^q6$DPhB5)d7c#l#SMN{~|<_-T(ICS!mcSf-N_q^z7U^u2qqMSz8~0+86I+72_XHvkk>vpmpCPP6J3fCcbhr(q*_ zs$5AYZo`buUe2hQ5X<>02#>;^{xSdueAe017VG7C@}B-z6vQFx28B@q1(-0P+>v@Q9t7 zk)4`lEFxeN8)=w>F(=7^y$4`~XX4(?GaqJICBW#*08AU8NC@6q%98))2UdIxfNk*czYKcs>}j2yU@pK4ua~kA8CBLGf;isi zM!R;}cTCg&zb9tZK)d`KJHw0C_SOj`4GOSv=ut-V=ROi3oIk&a>$hE8}7 zTG`p5tc~n<&`;-M_nNB7@SEx8Sx$LI!L*LL0@t?~$p~eMXXW$mefki)N+$N<#bUiZ2=veFv zfki)Z=orWmni35#Otc)+%xZPlisRHYr_P}d9thFL$#qUU$?={ss)U~6=fCZ<7{b4> zE!-sgC76>|D8NE4?VH%&2S2cWW+{;V-9XrD#=)d7^f5#-A%L6hLU39q`sei+!65ZDqb>~+xP+f!KYEid_)PTA%&yc8lQ;&#$I=k_d^9zZi3Sw{M}zG7sTuZ% zCuAR;1QLf*;HUIe^}2S@Y&C%d_h7JHMuw%|RIt@u?6*ytqchX-`E`6>7d!5keLbFS zVD_9MIz9`VjJ`XOW{_vJQL%$v@k`x{3_cjn2@iGfqSHN^$^<%lSl&sq%ZuUXj&`Wt z(%3x(-LZ{*69@3=LW)QTvFhA_mWMx{TWo`kSr|vp2pEypSQyJo4Hgbf!90TA*~A67k~BZ5&g)&nVFeM6O`pe z=3BQP;)HN$h1vAOx9>2<>pw_`Yh*&-f1Burk{Q<%bb4q3`^+X0#X9&N0k_n`nYv`i z06gKW$(|u(8rTxotpQkuiE^8i?<9=Zw}}`54W35TCW?t}erOmC7HaC|<&;0WOjepB zkF&!upnWhmM(KDuz4gccPi&Y5Xr3~Pr+WlB93k_7fGiD9!wVB>;l*dtCgsv0Qi0c( zmH|v;lXQGEzkKxOJJ52Op^JlcC8v1)?YG{he1s4eFj4^fCa0>kj&~4#j>aRq+u!Bn zEEK=7^DqovVeTRkAz+DZ?e}@PZ?*vi0}#2(e(~(_7waQsMvCb&DV|1gc+_!8OGEx-W{>&Uwmz$RdKZ#&Im{GNXKeEQ03&!-zVF2c9k0|*dsppX@)GH+HyrJ_wn-E4Oj<6R~S-#5F69+Xjyn=%-VpS5;91w#guW_$`{Y+cF>a#5qvB~L(Q$-mzWU(2pzGA*3C zROdMj*esVulMkz2C2(>Si_``U|1c!~8UUsVO(HVs7&h|yG}e)g#F$B0fI-Hz5q6B5 z>tp@<`Lp+U6WIbNVBicOoNsKyqK44DW5g{Oy!c@5Bj zxL>l*&psPX4$YZc=;G2T$xy|h4`pFKU*V(FM4*)fFrpvhYmT~=( zOJ{&4C~e_Pbsp}6AVK5YK|a+DdF3_1E;iN#!2kf-W*w^}mRdcUT?ZYF7i832)5rp$ zFD!Y06!#%OF!E!0SLy@bay@Jg-*~!}fX{2EMnvj55%*ec#*|J-NOf%g8lcj3`k(|g3 z88m}aWe6e+sI?kthi2Z{3I15|PSqSoJ_u$hzw6L_=0O5wAUtb@f8w6au$tw(`{Bd1wFwU!fbMuY zkF!5_ZV`JyU?>=2y@NW%XA}X<>W)E?-S7eu69-*h=Nw>ZGmA@eB!4_WBLfIz5(Iw0 z<8L)dC1&Fb95d*qGij@g5<5W;s$?QP^{~wJn;coaEBn?f@aeWSBU@w*N;NFH0BRff! zQ5qiBg1?!B!2S&aV69+Saj-(=EN|oPNoTyubGMXDGGu`!<6V+%Rv)Ep8N8VklQN_| znVhAm7X5Qtk`;wh={^`I%k6P0AR8l~m8a^22PZa^SP!drQUEQvTY>ob8$TtZOBtPD zRgk70>J!2r`!{Rt$ze9U%hk2wljk$ch7R((BH*?~)kT_+^t2}r*QidC%;A2ez$phd zo{LpHt!|3-b6l*)D9hnB&^_k9q!6lP6O3In_^6TG=)-5J0|rz88E}#rY|&dUK-YFz zfiVDe3WGC+$Jr0+Yu9V*Ja^BxnCTwC z20L;t#%r=cR&{Om%<|p>8D1+`8L*pyvWK!1*lQ`2(m!l6w4!fDyh0+<9~hAiA8r(7+P~!obAb+7*NORMGG$XJ$zd$2L*GUBZ3y7Aj3#>g_PZ&| zDPxv4rkH*}D8Mz3(gCHyNgU;q`*+jy#Tx*dIc5bOr}rN|PS>tKo1U3ElRo^}4*(uy z+yEdFr_rFuUa1H=IzEiWBm02xhtjY?{0eYxw;>vGcRzSPJ^k8O)0C(2oD$_>Z1XhaG)f303cb5>n6@6Qr=_Q!Py1%BFhb?k z)!-LHS|SgRqvmw179{l|MmLWFm2k8qe!+?wY>tCv{1`9_0YJIo=@fCIIU=9>DTdm> zMd!%+SoW8m7w6n#96Tf-vM?yU84``Evr`4LF~3uw1qheoF#sn`6Qz?;$ONw{x6ae7 zt`5$uKx7MXWR@rmJmgmkwyKmTEYm8~a=rpM@i&W4)(!zg(R`gHE3;-Q!XOTYgbbpI z6T<0uPWA?CGhV%pKtp*zW?LxqC>hfsfMR8vLo|pSM@ASNd9EBD}&Z=_dW`(pZo z|M<57Um22sC=`2L-rvBWU<3#&&0^avdSFu)poq(oJZG#5_dCMTd1;6&coPR{fHyqB9`LuvMmmASYqJKkACtd83Rjc z=q_Z|4mzeC5x4T;e71!_oB{_|P)p-A{6fkHfewZu0Kdnx=I9;Bqbm*86QSUtvN7$Y zWg`shGWqng@Nk!sb0(d5lEGj$040l0Ka)PiSm$QupfND!4`4VwJ&PTYr%hAd=Ht{n z*y2tAbbvK07aqMX1*3VqjUE9U$)*JM`=xI5xaojS9FGB`048h^YmMoQ{bn%&%vJ!U zNxBR7D<#4M6>P|{liMtv`(tUnD5p{r#eYE!!a?d>)g?2!{@ik1I1H{Le@%cK85!&b z-(n|_2b??`6d+IMJA}Sf)*hzO3pdjV0V88s5aa_4e7vDIslhkvXt^d26;8K=-o*@F zY=GcMTVZ0{Y@A680grS7fxZu6hQ&im;y<3F4;fd6^d~lW$>!B3Xd18A)j{W~R(wdTkyS)iBFN)r$9+!52t7!N)=PXH&?rI#C&rd&#9h422AskF2f#`LG|(2neqOQKBRP?s?8Hb-F`sZ)A&LnZYn}Km)@0nelbTo3mHVV3^slQNfdU>=e%Pvv%buUH{@W z@EW6Th*F%D*2c*UvdAMW%5&Fl@{nsZsNEm~N45oH=E*Wc0PY&fc7!l)ukEGw*enKk zJPpAOES_0P>+numG#$PC_1^@o!xA8iMXbV0ZDaU7 zy3bR%I!LnRbjxl=vPM}A1gX9c?SN;n^ZyIqcsY&XETNvGH;v18Pw38Ikosz5xtJs{diE&} z1t^4r*139&WJGVQNftxrnix^Wx!OJ_2G9u2$V(e0&(H2}->Y%dAjnr!qKx|bEKjm$ zW{Pz(Hi;;3y>-SAb3JY`KF6|}0;w>@brEboKgPcZUNJsq7UkaAIjJ+VRH<|7b;Mdm zB%_D3vuQ1;92mo0i1MI7zLLf};X?pu`ak8T*#{l_ec8 z_ZyrlBIt{k&!E&8NF0#O0iIpH$pIm&54V^D%xQrbzIk|og9Jn!qFtZSQDUAHXg92D zJ^?_V;0*R@BJ?cTMfvh(wagS4saA2Cqwvbq6=@PSTNJ1^bPYh=Bj_$wUi-iQlRt^m zpep=azLnf(OH2kAK_@{bJ@sw`ooZol-1i)f3>BP=B_p5L0IQBM&*6|etaBgE#68sk zH<5L-b9t&RI+_-=h;fWvX>FAaA0QaLl!gJ3pg%bdJ0d#lOhWqxKY5- zwRSPE0JjDXp#&hS(D+%~+fC!sd^R#71;(i_Hj79BF$V1ezo`=FyeG(;a{af$CEisR$hNd$V!KAM#yeT-4U>yXoY? zJq$4?7qJKS1>^{VvgpPsM;6bIz{%sZ!YT)IlnysjAJvsMm4FU||G8rr4{*US=o;y8 zwAw-(jTsw}Ra8Mji+BOgCg)a=GrX|K!65()r8UUWn^mkxwatyH-Bp@{IYq^8*@J9 z=ciyf<5L4?CS2g&``ZyhzTPz;Nb~dXM+TJsOlN30Q`q8YGe6%yQYd%{uSb{p6VwAP zE!9fj(C(wuKZxC@0vK5&z($s_V402M^tYaWo>I1-g)KskJh{nin+-z(Ir|_i%e&rI zgK`15unFv?y$?Q@JY2J~jwg(ey8{;0CcGn-NKS{^76`|a8iS9%oh4p{A9{Og~m`wvKV;MB*@&e3>@4FTX<4tENO zw#rPWpH)X!Tlgcn?Pjt0$N;8NIHDTI-YBCwfBm)QDmePciC?{i@-&;c9r z2q33va8!QrL&s!!JvP@V3O(Q5D|aKfQO)ui9u{NpB1UjZkjKv}Bli0o8n^@2n<%^| zF?z^>qunL&;~ZUpcxjZ1k{-aUoU=kHDDz`n6AWlHAJ zKSWjU0RPd%&*$J<*;FsRrAT7IM2@sEGs_0}0`)`iMjqSPt3UuWvCTHy@&G&{c9Q+_ z`LS5MPM&p*6YrKHiDUDihgIlTrGr>~ak_zt9r^iIa;#{Yp_ERM!xt@a{Bwgo0cR7R zV1F^%{M?McoLxE1(Br5sjDe|&RFs`!(qbJ8tg(TPYa+K;J1(YhY^9qo{ir~*W`W)V zDGDYdq=P6k206n7+GCEH)An&+1{*4r4S1@(I_rqheHtXE7d%gt$L|n zj$<=PB$dKFo`U{jbwWF&UsnG;FwAE+xvBn}B#1;D>!wWkEUZFQJDe8L-vc=)0dE4z zV{x>KC=CV)-!#N(i(3VNgGGfm49UC^zO@(uwhz^!AV5NQ39Dr5uM39PBYFi|8`TVDS0t z&rxRa>BU!nE&cWX{k>G$Sml1yRw8^1kmaV1TqFd{W*<+{v0QP)Vh>+` zA&p*-e7w%(p+F~5}Q5f(H2L^C!VHlzjg!{2^ z>l7uhdClL2uyQxt_rlAmiM)CriDUH;gLTE@q{+c2>!T%_1+#7o)HqJ|ZKv zk-qxsbLlHD|9Z@QmKS^L!N<|O(5(%T>|^f%NhoV4bMjFZ*ECiV~o}oYnmu+HQ;Fu}6IQ${_!NV9r=iS;7?ZFy?2`MQC2ISiTS=<+V zRsRd1VfnG$YZw(II=;rn7Y&yb4^KeLu#YSthLP@I%jsoO+Wuay7m6M>G;+ssh{=Bp<~iDoM_PH!_4N(ON){g0d!6t zGJIYS+UP(jX;JB^@iU({Y?V%e?1A$rCuhkNAlEJS!UJkNT}Zz1G-be<{9M}q>5ri* z0Mv)fgb+ZHh446VU@HM`f7-)vn>q9pqyU!~?he=LM{lK*=&*FMcANWmR84rYhD~?c z6r7cYS1Oq6Y|!C4G6fBzjt-)0(jA;GVm3q|8>e>xRN@-QMgR}qui;l84GLUmX-gUO zS7&UdzKVkl8Vc7NCKEM)gHrEh^2wPso0F5f%1t>bL@<{{Cd!}P<>S~i?Y`%3YO4tX ziMoR3)i}Umpx^G8pI6g2=uG(>*G3cI{_>CbBlwi}8+M;+MKtdbNN7_8iJn?!#x+XA z?6Ex+uAPYq0|)ug`XBF4kfKX+DgztD?|cr|s27O(EIn>zFT*_y7tzs`2Jo>?*c5H6 zpu;Ta0Z9zUel4C~NExbYj+UC58b)?-s5XT1l!7hio3^p#24SwRNs=aZOI<7<8nj#a zFv!4e@Vp+>VA*t-k&anT>>8y6_t_`+(;;Z(;o)97dwDMHJ=x;a!nO45i)Ygb4Rl3h z(TNoeDvj!FWY5_DLr*9ZEU+cos8brw?mym1cRt;W{kHlw4#*p&xk;PPrbJe~!~G(N zvG0OnWl|big+SZPB?(kO6B%sYFDE;a9{kJ(TTfwhy=<)V1UYB$OAm-hm_DY?3&MP;1P|< z=$4Jvf;@vp$EJDsLe=Iqp26ly*CNNdnW0rTZL&PzUTi!Q3-y~e=J{-Tvzjx6OrzDD zJj<+k{7eKz07)whk)R@B;*^~Uk|_pPUW2=j47`n$+L)-g0@#&J0}R360RDo1gpV;H zsZX)7y4{pVY~X;+M(ULXkI?o&CKHr#v@)+7;B|g>3+J-C%RwRd0Vnz7IMuQuz~$d1 ze7T0sXVw3Qq2t-gBpmYsUlIe3D6JD@SalrX)KbP?@L(76+63(A;4RvWB+8K!r}HMTiYkRBCcDw0w%v@MQ}G>gJZ z$9%imt%N#-I!y@fI0Af6hAvm1Y(MK$ey)og=pVX{3wK-T))> zUcMapr9idW0861+49DuH9Kb-PW0xpK(taBOHF_Y%;$*QM8hq$Zzxr(I0{racQ~(ts zqXv&WoZ)bTVL(Wp-{1W5SA#ROwelF{=si>ni1*=*aEXE}ADbQM0Fowu?YGnE^irC6 z=GmZdRLMSdxxU;{);$!Y%cg$oKmBg%2gD9h3fg$IO7!twIztwAn3BuKZ~QDRzWh?E zP>Z${+0Vw?q1zyvH-}<9{+pjN7p{r)@3U{3Pct>aZ3Bg$8t1Yl$6HNN~#U~d%M z#VMFE6K(0FW6TVSXy#5*xlSvD9PsI0k2t~w0Wz|aYM^2Aa)LD-iqW1Uzoaab0YqjK zMytGp{bA=(ECH}iOB#a0i3jFl5NjHiPB0!eR2W413c+L2O!ZJed(F zI*6iR95tv2?GX3b2bj#FJSWam<%p`&joZu>JxX7F`C|HS|LgCjr=Pl-E}iF8EzWB{ zEYn0md3J;&R9#As>UErw6^aJRZ^=jC(PGcb?wvo^P+%ODZ5@V9aeV|Jo?K+#-wFJv z^T^&fdm`;da5_r1*2vT5jRHTvw7f_KV3uctlLt5|P9aqSHEuLu@c^ zXn^r!d8Q6z0AyLl%{951zJBR6=oZukk3H>YN=F6~VjGSUJ4X4qKdr29rtkf4|BRbw z?GcU+`V|dgHU#qf0UUj&L^g+iOY1@F!id>bySql_lk7}oA09g(vGn*!G>LT7E!I~) zE9um9y1q>m&A!bq5ocK*@(>4%$cB!Wz=u7b!0`wSZ2GDp8)F>qkfR!M!ah!O>YRr- z&?cOkG)(ofJH5v7%4)N4v7BIX}sZavefPyE%P?3y6fu>kzr`qV@QO)w{ zN;EZOsQA=r-ZJw$!o6xUJZAc2V;$fJc&IQCeQ$@o!l{KGpj*hdLAlfq$02D`GL!86GYKXf41TzpO3e;uk4+~eXQYP(XlH{8@dda^n%{hKC8j1W66j0;sv7j-C z9vkZ+6qX+=B{FL~r&&$|8G*21SDBSRY4`I8fJC~9Ht_Q)CfovGL;8#IrVbK-u;zBW z4-wGI)8xe%K%sZc*K#sWf+AH!_ncazjUPlF6w$@SC2Y5){6RRq)0Ebs zpAZPT);5V%ki{xs)ymH>fJGUPDG~-=e9jSI#Ehq^V3u6DoTMD>36mbEFgpjB=3Y#g z#0V>b@f`MZ2R(Fhf#rSYMR!SXY5Sc^YhOtCab!Xp(3@aFkU?;20;a?mFUKf`e^YMx z3d)1R!L{nhp1mn1Bc#lY5j>kQ?NI4*?5euuNI>ndOuq*kD3Qvr*HYky z_?gKfWx=YPdnT|CpUB_Z7!SmVLm*fmtCy$PJ9Q5Y2%YnLLAXcax<&%B4w)A1)-=J2 zAk9f5W{~CKdws8~pQ-_tkEoKF(Jio#Ts>sk#xTcGBDt&${UgY)_a$|~In`#3Cy*PI ziJ*!GP2|h$umQNTV`hCE+D-H&AUoOypI4t?Le%~QtNrsAu0(^GncJ*(1iA3F`gap( zJ-QL}1#p*uT^SY#iyb3C5X`c6-pc^*nczMaXC@=r`1ViVVsZ@Q#JC?RPf|Hdq{JWA z%YdP`|7QR3m)IbKA!&&Q5>D69{y6K}%JwNihOaj}Zenc=IdR5tPeQp7Qa_+}jS?3` zV8hYjL!!-q4al&FHUZd~=mHJ!eGHFg@iSz^EDaZl5T@qmh};;hfnqnHH}vCF>R5uw znW?n?-bWl0JDc|4-b^emAlM!)N9Hbv+v0QXfBYdt_VW?Zl%O|wrnq3mEbJ)8e1fQD zmMCEYAk(I_{3$^3%!{w4x%1c3M{oRjTKVV~B>fO%#;XYK6s{6rjvu8nFTI+M$rNvr z(HOxQ6rYE(zzn^{Cm*Ehr=I{8ahed7FTVB)Be3B0vDnV1z4F3KjI&uzZ@vBoBdvDR z)J0B*yZ1P?3XJuDL%4z=C}$`|!&SOLW_o4sFVn)SUqV?A(^HenY2;Zr$$*rNhxgLP zgLfheJwv9ugTa{?C&GkVb-MZ_)eDqwEhAd)Bigjvq8?y_XqO13wf7l6BZ{AxXYv?I zC@@5jbPZT0{5_k+I;kjjSQ;J1{5wFZ3bu)giy6mR(~E}{ADyhf?`_6@Rc4t8I%VE(_Co) zCge$rE6=9N?=54hTz-yO4x>au$WW{y{@p0=Ga6+jedjm6lD_u!7t-}h^Ee+s75uwA zk?Z6nz>Lg$g)wf88l_Y~QWZxx1n@K~t8uVQYDV7qqrv|})^wUBGP?C0f-{tHnEjNy zS8UTPftr-znV&+0es=L{8b?Q1Xk@@{ZH{XlnMXb`+JM-0Kd&dd8tHj~5uC0)GNMPw z;R#becHwgEGo|4e`hgQ1Y?3=6Ff4HjT$B`byR5GR>7DcdI{#(a7*)l|o!BtK{oJ_l z6spR;v#ySViSAiSlyWwYpv*Wc)X5J9f_=!*-e-^S0hAzd#(fz4T!{>Xn}XqhWI~n@ zJ|HD-D!=SChQX|_y)%Td6JUJ0kg9xcfF(sU$ zRXLPS;gU&%9AH;i3!65(00RSFx52*+Bx2MS4!OYnvk=@La;zS^huRBl8q2}+@;iTq z%*NXKJjh_L`3`+GP{8TQoAfz+U2ne$`EY`QUu~CJ$(ZcGKHD^-eKYXnl7IjLADp#^ z25F*fTFF7>=|CN5aZ*zoUakkp3{m!NlHIea5G8Z=%6qi*THxo=lqiD3;rnw>eDbtL zGt~0Rd*d0=Bcd6r4;`vuO)}^M@Nd-%xvuKmEC6+Ztgas-*x21U$)e+4|u4R=2O9ZFd_&m?*tkh%lN*;5y z#5J*(XP3{!ao8SwbAo(m&uwHBkXdnZU*%fbbD(Y3u4+RB`FSvbO=J_$Jc71HCUb!O za0Hm(B<8fUua2iQqZmz@$f4AOE{(m?Riu3lBj3D)>mc|1ZWIh6uU*|MIChN0P#OJl zV!-zLRw|L08fGMD37_bwDuKLJ;0~v{TJh_z84x0S=$kTR7TgT4d#A0oflPp{u31IZ zX4}BuRIa%XL6dLn0Ywfiw|Hw zJb#zxILkHpt!EhE9on93)9#!p4)#_V;#JOER8Jxf!uBjBCWn zZLGOR4%-mclSg__xMxA+-u5nXLiGWkW{*XI;IV|iu?bQWP+|X%@DrZWW^=#)_T*3f zk~9!A-_i!$Z!`T;#7rhwx$=}WxxO|$Ke)dFE?}Y&^18#GJbn3W_z!u|PW%z58*n<` z+u(~OjP*Mvd19Y~jdasYVc1-OcZ^(_@RJVM&(Ec2SOt=HQ$zN(A&xflZ%-jbFEKLQ zh{mmpbIuY;Qgsgl7akGg(Wzi$C=cZjf_QRvRuY5!EzBAM?u-GBafNoE5S>OeO>}^n zx18PqNH+l$Cpefhm#@+}-7A&asXWTfl;DLNklkfX5PlQ~qf!{B!;r=4rxa~;I5mR< zZ6mkhBlTj7=__O|SgHchluxc{#oH&;Kczlig4TpUqD9CP4QDZtW%! zA-l)*2h$cCmKHCj<#RXE@R@hg?oYoDFR_?j`Sv%+L=jmakj?}>=H3@B-b}mqDN~VU zs_}WtXO<{$ktL#gdw`7Wz#v_o9O?MsTYsIt^Pl{^w6?#U9=`u}I-uF0(?6CjUcZsP z`t?`RKmW)7E72}T0m5mnR(HqRhXlW|%EA{yoB-sBTBC`OwCT}NYCrVz*tV06t^mE^m!D?Xb-A^BW~V1%UO z@;ydJ=x!LoHsBN1QE*h99c;(0-LE<{R2xSSuAc8$XWryz3X;OvVR7W@SZ=ip<2|Yv zD!f1;qfx?GAb8xklo=eNMjcrkaEy=7g(6yhs?+FK0@RL8l98JpNH5%YD*gTc==Y(H zd5dblFf07L;tfzJ6Djy=-g6|sC6e&PHm*w3yTHNEVvhRFO2U!UU2d_~V1$H`gjYwV~ z!YX@oB0o(x;Ew;J|M7na@E-F~0TZsP%IPd#GcHmgsB#xfO-5xfI7$6Py^!maWFiK zoHX2o#In(Ud z#OzF}QW+?*4vx1QB=9yvEKtf%Fol62&Oog|aBEia5N@%9;=4G)tn){aXaBRY%>9vG zA;>VRCAf{s2EW(FDBEr9kWNDIr=b?iMAd~m^X?4pgP)26sG^rcREP}z%zijGRHs+w zz>Pi{TxS;N(HUlya$lC31s!HI1b>e65sb@gv%BB>@g7_Po7U8KIh>wv&jsV?WIx+1 zjZRuBg{jCv%d>pe5LLG(ptry=+gW-foIlFuMVVFLrM>7;9R#vmN7>}_M3ww zX;nZtC|CsKd3vBz1MDr4y5W>gKea#>?U2veCNMJ4(9XFA0`g6Qg(eiG2rOy55|lRZ zpIxTFj4=(NpLMgrQ`(izTIxn|j9{7T7684cWR5#T7&UCvzx~mN1cZDRfTPys$X$Hl z7$=ra0&)c5HV~dt=61}SGUl2JFo0L+ubD@ZJ5n03>B>);=>t~&Jl(7;pyl3o06ANX z^xN4aL(BE&p=t590O20Q6csiAlugKHMFkQC=rJB-P@~#l2Gaw8{9I4KG+-O0x^lQj zK#$%j;~tdYfjkuee2qla0c(`wK&3_Ie?OqAGAQ2_yWu##K6)0cwg}vSYyVTM+=kDgr+Kt zLUC}Pb5!Lj;FA_~NW36xfnVo4lO!w@f$|U~e@=$PUYZ>pz%J;|j?v{vEV0G{-8^8? znY4}tRW6l7>4pMfDH9cFR7G^pQhl9DQ&cq2S@q%&aOsR>M_n2$>6@zi@M>A}6JC3a zEZNYGRiZaxA73~@MGc*2eS+?Wp4mI)2r{$7L9ODxtQ0w-w16xOc0H5{9P~?FY;Y>4 z0i~Q}gE7ZdDo6e7c@}vX1A95K$2rWdZRVGB260|>otZaanHC^8hYb^dbonfK#W4Yb z=eG(|1aRBr-ol<}>)P0l8Wrto(SZQDv_TTcfM$?{W*m@ZQi?MfJaEJ5U*d^Kwg9w8 z`79^VNOhEgWTi;Tq;}T^#L>k$begqCKct=6(08;|Pji>x2a6BvQk4&+A#-8aO`aBs zM;ZL%VD!E$krKw!wna+=SRm}384dV5JWvixK!ajBfAbOo*fT`V1(eTbC+Q`cSsuLi z3qULxFm9%wY#Y1DTuz-~In6IErl0-nLz+6SrepY^W|BS4Or+lcU{y-+1t-|7F@zdn zI$Ya^yzkhvV`u?HovqPy2)Fa*wdd2{`LCW%ciwzGz4K@PlB~@ki%aT$?ot4plmYM| z0FFedupo(XPZV|2)r(hC=Ka^xmwxNp2;*cr*nzV`cA$wd*VfjEx(2ILlpXoDDNl^fq}=H--Td#RW5Cno;Keurt;$iW^V4%Qrwyew_;O_q*BHO_3^);^ zz)GxtbPKQ#Pn6;Io}ks@Su>PVAsj1lXfK%xGmHCAFp6ZVxF1L1I66v4XPn}CBT1~O ziM!n3kA8VXh>44sZ9<_`ky%=8T4 z*y+RMbJ<0dJxaGIw8H40xDFYIC^dRrWWy*wN_8pu@~CSv)_ z=pTz~vt1cZQwy$Ulc^Nfo;#nu{_p&HI)kkI;Cufn-GA~iYL5|IIpstl8Vjd%q-4Z0 ztQ!w7Gsz5Ho=Z@*+dM|Wh(=i(6-Z?i48lKelc_W83D94n)LOGDm+pJ{2vk{OD` z-|PdsuGfv*ziO;FxB-Zi@@FqZR%jtH#T>w;sf04SFFc!=0v2ZIL( z9p~JSqmS$ZhTZ4O!pR)Lkq$ABdivZ_`fvWxf1lO>-z5T&M-QRY5dHt@_x=wW={Us) zBl6LQAEk{?J|Y{2jx@Mn}P3kI$wK7znCf0Sx_{e;c4W`KH1e7u+k1vLK~Gc4-R0Z|1Fx zV?1I!S`6_=%H_NZ0LsX)_BY2!J#|p9;c)seQtBk1qZ)b=WsLVD(Y$Zg*H=38`uuyQTatD;=|HVM08mKKxn%kR-k4Q>41>?!w)9`SYpb@rEalh16!rH!^R zy#>4C>!cKUbhfe>=@+hV!7x%mPHXfpOwzR<6Aw5& zO+GUD1{!i{Iz)B^tMXSpP-bCiKEOp&ngweR0T_3yuh-6E>9Id9E6>jQ3fQ$(0`YMh zkpK>6sNJ8KxB%_I13cV^A)1V&%~jxio*eG;98`6u33Qc%BkVf%^%Of}71+TA29C}*_n2a3Li=gpBQ@d( z`>qbM94IN@Gumqi`iMH`31n5Cvr!}1<$b3l^ailA^K%?!45-xiNR>Ng>}ZF@%u(fN zU^il%9XjI#0%X9WY`S>ZIp zU7{(C8{@O^@92uo`{bb=g0IC6~8-ie;*SYTqCUQ+hYzxX6cKTpr zmvzX;NXi`dBR#|G95T&GAUy?GHE=ZR?jAec!JYz>B%V@cFlsjF6DR6u=|-)jqO#I&K_S)V>rLQ^RiXz8S*;=4Ru@mfQa-1; z?nE9buWbSsPY^VdeF%;?+}L8^57%ckL+PSy#6#gR5KuCL7&;r*j8@S9$Urz^#+jW3 zjD%%j_BkR4qwOotKTQU$%%-x502#Aa({Nz-QJ%%cg>?5F78HYi0;kTBsx$NQgYa*- z+-D!&Nquk}i;I^q24q0kfI&tOd2mG*f})DN!c4mQ!gK(H8s*@6p3(~j2D_Fv@2zrx zz!C=&6w`;dKH#kMYw36X!GD+D_~ZXM?dtCtz%4002M$Nkl5=C6Mvtv`O8Hn-q4;z$HF zf`;_7L1s|m@xnsdWnD_At7)6DD><%hrJ@PQE%uMlbO=X| z$gw`QfRkn1&PgZTy8R%%@zZzGeH_pCzxP*UPjG-NQdh^!!+E?c3(ek{!Lp=-vYT08 z-HlvX#wdS!f>V^YSR;+KO+HMLBnstw0Gl8}T@o1Uq`W>=AMJQV`5IyYml=8|7pSwu zlr)5SJTZF0Dtd5%^Rg?*7%xlvTw@D^`{UKMbm!4M_7_esf#6lRtAl{a!I6pdyZ_+7 zOfRiIBC;!`M<3r!KmWlG(%#l?YCO1|vN*tzDFQkSh(Kom8)49LctF65Q8I84AUTt< z$ZXd=Kv|xQx&gG$qkC1RTX^{jhCZXbu^^T@TVy9dFgP@f|K4fZsBQ5E9vHAR0Kd13 zGg-cVDShF~FQmu!AEfmMt5hY{Vzitz37xBRJ)_wYgYD>>VR&Gz#tscOjkNda15RiI z&?AR=jCqMH#xVaTEQ-Lp!~zvrpEir!B?$INPS-w#c~YSKIyrj-fY==J zV1GeVrwd(2XAnIW$c~e540?J&r+UgptH11zz(YVdMwZQeu;I;MxI-}6XOjuYz)tiw z&*Tx^Hj)h}mpI=){13mI-u%()>Gqum>;p6p?z{JeJhGSmTlq0t6FvY(EI1Ps85|pA zTa}al-3wr~SvWvmCs$ZIqS-Y7)am;37@F@|aoRVl%MYXCk#!M`5?2r^)mN}j$M^fvaYX@E#a|DaAEFttM^?#AWI=sQhJ&gcaQ?4DCwSj^Z1*VFQ@r(y||@xJI2l?7z10%gjqxtTS`aXD4u5YRaY=p7*oxX$4N zf*l)!f^W@h%Val4(WN+@jc2Jc{!n=l&{_TQ&?B>IMdl|p@Jk=w-$IX5`qnqUmOlIF z4*rxSL0%`YWoF3x$oSfTTrIPX>_>veC-~_y7tm=T6(($)kWBKh8>|pA0Dy~lg&pA8 zoKLI0m)Gd@lyN}91m$2GA}!quc>9qV16PkpwxeIlR}24K=Cch5oUv*`%8X%55J5JA z8b(kYr)RO|&AOaRR=or}@_khUo1<*fbRNIucBzIPGKu0C&YFTE3BYGO9Og2zf*I_# zBiICdkt9-&IMivJgh_=xsw*2j`#!+^^FASMbOG5B&>3XOPd%aB-ekYcbUWYQ_e66h z@*-s=n)nIQtx_P_;it4Mo)jj1tw{32XYeTK9P3l&nVQH{13*{<&&$)hj0u$?XlW%V!9Hl`(@~ zX+Lt?bW^pO~&0qeA43R_gQA9Sah_R9jcF1V4 zS@<;T_diRMH(#U7gE6BEdl)Couftg98~}_nYBAgP@b)?n1-M|tYW;99`7Ak0o}N_U zl%>0OHXy;*(;nETQo)#=f027!Pwnw}q?;eQYXG8CoCw+iy5tbF|Bt9Q3zGD_((}Ho z%*w2+ec!vPdhbR9Xlw+CAwg0ANRZ%=G)EzYEz!Q2u)<+Q*mu^A6hY}iVTU4QM_3_; z9F`ZYA;n0d#TgA}hM3`iAP9g22sF@*Mz6JY*Os+sRaS20=Q)WU7g+4>s?0zC|NG8& z&U@ao?NBJUa3ekb^7H8)AW3>YPgSo4O%@&1)>?gYaSJFyzijk=4}XDW z1nuxlph%wC&=r~;K}OMR!0c)OSbal6h9x|7Ro34yl}r);YaLV+gH4%jHg5`n2j7sN zc|RUxi&|8Kk-ET6YFK3g1!O8d^S5N}o#f=mK?e%(0EC!Spxm0F1wtlPb$5_|T&6j> zhTjmEFElMi_Q|PLh9Kh2{9(grpDQXTkrVTmT6uanHr%UgT|ijtuv%FT>oG+n<=TFl zV9MExuYEl|_VkQ?BTi45^n#vLqM&MS>Qq{~eJgzgE7{91hK&sijO_8pfC0dQmDd(u zwyPyMh_aO`e%s4UGU#Ih;Tk~U-dLDt=_v*QsMP?|!~G+8yY+PA`a=5P+Ku#^4{xNE z`xqW%%aA{?CAuFtuiboPy=Dt|%)M0cMWcMyB&TBtc-Po_zQTC3Z>WthPA!FQIMV7> z&Y8-Lu>S+(PemCO?OunUw;`tvW{{bewJ?9vy{oX7bq;AL4W%_YKYh3Jr?|(MT~U`r|%5lGlFqF@~!rjRRE9 zKk<0F`|J0kb&n|ohM2JsNC@v`I-VU4%%QaXP@YaNI(a)drfu|IHNR|hdKyK$$JE4y z7`<62(QA^uwY6Zj2VDl>w$~*a$hqK7QPQ8QzW6L1Je$9BVghJsn2nwYpwy6Vcz}I= z&f~Z4&ohYLVf(`9M7R$F(*gjkv=u?IMnNa_7=@pQ!SehHdThepVoA9==ab{gxfMJU z^i@vScRQf?c@`7fKU+E3*`SpgdcckVvfY9=9aBSq7Q}X=&#?2zH(^kW{5P+jOJ^_& zfBXIa(H?r(f!6cH8esHvoMmSHGlP*17LbRIHc))7Iy);P7SD~La}C&59XEd#0FR5x zI;uNlXw^$T%LzX>V&qxzOHkbE3c?n&-W<+yg@}OQ#P`L}3!cdi7aBm*Ds(g*`P{~H zb7Ooq<7{a&*FHju3j|RK-FErH96HD~#nEAH+F@?J7uj4xp*g^;bYmxB%Q} z>O^Sv*_csM8!I}2yk54p%)LZsA@HQy00P zo{eae=;tCokBucWPLZwx z+q{cj_8uENJD(D-Q@^^d z*EVtvQBuFiF0U*v^LofA?DrIHVr`~iG)_73+^iEtRBzbkPkp%$TW<@t4jd8zu-!Fu z1p1kRYS$zQn4peZp&YyxR~cb8EY!xKbaEP&o1eq(uuz6yy#aw=^g_eDj-NRj`AH>= z$I8tG!im@|fP;siy9gb=G@rWXF0csD+I_SPqDaC?A6+Pn{SwBG5ab?tj_tJDlKrW% z7_tF}95%z-#GS(%Gi<158lwnN+$t24&INas9?hs8g?X%P=^;$e!}(mROUY+(S0EGTzZ^dzf_w8z)I`4!Ydgb97)%H`F84^Jd>{9SxW6J z=EXk1^DgXdKTPil=?Jt}Sq3m~Qn0YJwvgt=&!vrrcR8dQ81jZ`HFJy9g*^GUb&C5s z2+MY&+*S$6LwSc{qT@M;ko$9UPzVL{BXI`u+}{bI>@sYYxrc2WJQ*lkjILoz=~17|^_ zhTK98EFDZ49xx#q*WSY=b1S<=HNxixP%oT);~oUnIz<-v*n3n(WqwjLtj3FM*nk{E zC5UdeRnv1{e>y$+g~t%mZrBcgPG7qrpBKsn*r807z zqYIL<{6VaYC#lmOj%_nwIk3Pmcg*}wSpwUoz;U+3)A z$EQg6r;XREuckSS=uiLg|C)9P8SUS=PAb{`)OGn1P6RD=4g~+ETX?p}i31080vyff zQW=@Em!BL$X;G9ws7^4Gg9*1OzfN$6?f96c7w3~TH&)xu{?&-|JfNc9mPNj2zjT+n zO`H|js2so}gZ!!+TqoPTHA$`Vc{&;7QACbp=r)iUi&^S*dUV2IG;oqVn+2|CzVi;L zC~YWtZj*1}vub(iNV*hxOG%02mI(O@z|iuVZ&-D`DooM+h{0G1%po~MpW_J%x9 z<_X%TS0+VvmD-yCe4cyHrVQN)x#%*9Hp#c2sF5>)p?zazVwwV|!)E$>fBzrwIpnOu z45L8e+y~g@eo5erJZOti5awW>lyg;<3}@t$qn~Z_7fda`(?H6Ex>_1ujbX??YajQ< zbv39izATM8N=DE2N`hK8l>6ZK{1K@Qo%5L%8dF(b+nCA7TkGL7=13vx zVRDeb_RdF(X$X=+mihp2*+%-41xpsG$@F{Q0(!%eDg*ny2NmGeqGcXQ&EWJpgF8kZTtmQUkDQyL2E56tmm2wa#Y>dW}8|;K&Lf?Nx6t ze{qOk2RyUjW^jZ;R#F=>SpYxJpaT#@4T7hPRB_JDg|=_6eR7)&2U15$*jY$cMhMFm z`l9oT$a+7uLEZFT-rC?fxu+@^$B48};=sUA*q5=%e#XCVvOcUcU`VI4Z;Xyc=8*7p zg52s*X%#Z(aG!E+>m62ul~!xS(3G4~VA zQ!{ye&!lijJYa{UE{HO1y0*$SamdDl|c)MU%)t8ziLi7@cl-?(Mbh(4VHKm=|vWzRtZxmv-QiCAI}%2EFcRdYN2{ zNd?|LI73A`9hpvQOFL682FQUny>4iPcAKns6`3}@#B-~RcM-X@2|zm-(>!G{tG)AS z$%bJ^iMy3ndn0{J88ZnUgTtYA5uh{Hsy9yuZ1KXIVlv9rVCxkd4G0h|FyY2pqk7>V*&~NcoK#`aP{OsNT@+p^ z&0LyFFTVZ~N$&y)7!^YA@YuYUJWz~8Hq%K+n<5K5{B7HgARmAD(?3jC&RhvkoLq>Q zxYFeaUY@IjC*2QNbnK=*eodP=qT@glCh^WvIlXu7div8peT)3n&rtO!a|!|qu=*#g zX@lp;UXOX5{&?mX1Ajlxwivc(#UKOlz=PLUdl> zHJ%ZToj}PDU5)lUD%=9;5HaY26*Ke{VPfuAWs1Jl>L5dg?^Ld=3QEoZ!6EEw)&0ca zZ*5TVt+MBO+w9f0>@S5S4q@@I7b(gqQ#A2_Jk};GH5P`7-ysZQJFgBOJ_+onD``=D~{vUsoZoc)Nmxu|B4SL)U#%9m8~jvdk6;|08pe$Bd!2zio~jJX4~M$^C8?Hc^O#$xx&_oDr4!7Gby|vfAX| z1puHqum?O}9%fVESb?csT_McMXJ?Rmds{2dk@~~)$W9U_!Z@|=Rc#n4M*LgGUy#Q3 z1XJREiP-zvyQN8xh|p^fUUZ31rXi)!kKodzuYiV?g3!4ANHAX>nzP88S_)FYq_wu2}l_61!% zMCz2?LwpYFBeK|}8an2uZ+wzYotaBlo_mY~0(fV8jc{oHHtC|^po$RyjCC@du=F|r zvV4MABPtaRRt_fHA}Wpk%F=@XGe!VyJ*Hgk!WvIaPNWP`ng+Ubl{Acgm}xX}Fv_lh z9gh0Z6FQL=EDw@qB*QAmv-OsFqKdZq$h49{h$BQHZ15h@D#h~D+$7IWJH`9U>^%kM zq&Y4wfC<=hW!N+l9T2-Z3Dnhz0^t*w(#g{^*oGQ?&q;g2X6*7B-;<}N=!pAk^$>2! z)UKeBa}uL^c|CQ;e7=I&N=dLcc!j>M`>fB=26ln{&!dCZ$WLw%1#?7j8~0KoTDV_Y z3AxB%^P-nOAu9_=^EmNZRM}n`^629Zz%?a9#<3rLvV-GpF@6~gQibs~Z{9Q4g^STz0@oh+zhkQJ{}b1^tVA6(K!3cP=teLe((#3*JQoLP~;*K4i(~uv%yqd6p6T_i2Q5cWRk@n{S0L>&t1G> z&ipnud}Ml<)R;q}eA}rB5haNATwfaq8O91K4i#$v*W84!7vx9GL2`O)J~s*tw$&5H5hvysr*Oh646p{a$B(Dd^_P+?3A~N)rX3pqRs#5&cD&m`7H1EpsSd+(#OB%X+kTL9GpE!0AAZDeh6`Z2R{NDEiy)h2 zlk1ITLL`j!YZON*9D(8GcjiGgp}B3Db3kZ9p)TN=MSoXrQRS}JC}@-R+9jOP)r%*` z!LY|7(5nRqFnFEh4CevFrXa{Jgp|o%a2Pu|=!c(P!>j8}(@fl1lTE|BXu~VZAmF=n zdbs`GuT%fCFQsMfvkT_8cYyHv$zEDrUPzr3=&aqMjm!MK^xg0McPL5n#9seFS|>%~ zCvW_}sa~q3OHV!%40T`sD2GGg1$dTZnW(rXR}_ndH6ZNG#y1J$yUPU_&oaQ;sV|^U z90%_kLDA5_)Oc1zKpvnR0M8zoD*O22H3C@`kVe)ponT6m%m3|v1`y_V?HB{xp)Lel!(bJtM#Vg(9`dm~Oy0(4#*Ani=M_QB-I12)p_rS8xLBFj1YgTn*zp{v*A8? z2=$cZUyN|DoUup2fbr9oGlIg@$gyVT$t#Y zcK3E_Um^u(YBtqLVHlm8r4SF01-tB7>zkP&4TE%}@?8`WfC5=-D&yV^>l$F!e43@U0Sr`W0q0TKT3QZJ zYk{p3{Ma0Fmnlrdxn|iSm&~F-0isNI*}7YqULJq)K<7|bWT8?44+nU zL}ZoCW6r{ob1SU1E5{dSU1!s8b6qPK9c_S)c1*bAxzt%z7}+ z+7kh?>%wM*0n^~%AM!h{#XZT&xsd7*i{QKkK~x-dq+51LJ7AxgALq&Sqp=QYY;=vO7O&crg$ zb&)lkK}V@m+jUado1R|w(RN#axsN_xOkaNOm2~^|JSkd(tOFneC*SF2764HRojS0K zI#>cQ&yd|QPB?1~?3Z9hd5$7OR!M!S{QG>SE!aj(VrgaJVXU!ET%>bwUqhs_nETuX z0JQy-YiXLC&$15*<0PvC{_V6pBQL(j$$&aDrq394*s{oovnWPzumKVUfOJwAjs+4r zLNS%&W&0>3oL{6xm7mFe%77|A2GZwdrowji4+2y;8|Hil7U47PDQp^M9h=ZcPQJr9 z>e_Vf$*8Zu$uzD=sX;z%iB-eaWC4Xb?>&R0 zyY^K#m4`j&dgevjuRYTS3BWoF0F{6L^qiW;AyX|oX|!T}WkqoXZw;}Abg@D%wqU=# zzB-_fbyS+QC$jq<&s_@Gj1Fiw%pu=ir>!R!8KT33=XDSg#7-inE%c)ox6!R9$c7Wr z%t>~jj>UCN@ypf&QI@y3)x?7xFB+;xc?i36P(!F`Trw+>7a zpiGAoi`eqW{{ha5pw>v--Uhke*g5ZYm((u5#2nsV5PDi>?yVJ7px#OxhvQiRMorYJ`ev_d?3QwGQuYR zHmH1SqXGPLFMkEa1#m&>xX|ba1dTqSPd=9Jkpr2<0If2vQ@XyVm-by4^&B@+MH$=6 zbKFY@f`B z|J~*ScW0=;LP)FRq<0O#GcB=%6fAVoEMln6jBtr=#^=V zSc*qNDKp?}RP;>_vaGm5sv#$R04xSF6lvA2)4)tnN7M0M?i{OC zQT%A5e7m&;IX! znr?n_7cj$t=p>BRJr22B+k>VF2@6w%xbQdLR>!Oio$I?Ens@8ETn-+=^irgjjW|J-= zt1uyRPsnm}Z5WBK{I&m{h{&gDlL>c~ySLKf-RqI3CwMTba!jsQKRvpShepx{Yj=V( z5S>z3SF~&Ei~=?V3~j7)7MW4Ta`ecBn*kI7&Ty)WG{i4*mJbPWbyKK1cji)>|KvKx zbSuC~6C`PS8>3Sv=mHB0n^pFR`^W((Jj1qt+M^pv2G8Gd=Udbq^vsOm&~~M*c_u#& zGtLw}SSM`gUV3hP4pa_bripkxq5v3ZLzx0pj#z*K&y4>vU82d}2ax0O1STW&j@-gQ zK8CrG<&lLs#2B?&j*()(8CD)F;0WNzwW4TEK%pLlX*6d~9cc7RBT)pnml^P3h_s6e z>N4RwL)^-k%%TjFd2^0Yc5O{7(=b^n@116G?(LSM&OXNQ%O2AI5l;Yt5Uc=DJWy`T6*U4 z6#x^4mGr4Bj)!66Uh+X5&>^_Bw=_2vwFc%-dS)E9WYhm5tl-+pG6RCJMHqdZ>n@(D zK!Ht-wQ?AR45%G+STxoE#vX^F2YcBD5ORX!Iw=TqqZCrY)(QY4WrFv1cN$&mOmDoQ zLrA}Q&I(7JezBiN16Z#u@+JoWJF>j)0eB`H5eukfdfU;FZ8_x?`DV{$LoF&bZ?+x1 zxkvjyi|&kq>W~a@AIjW=#g%mC{29Oz>?HO&!pTHgZ1JYUW!Ty0GSxw6r;P$R*>c-# z?IRbxkU?UHXtTyT$u=9^^50H)(>bwX-g^`PS;x^&p6eElgAT#!04#FtK#vSN0dj$8wTFUg=A*ncx+~BYm*#FCCXXAO*!vjRE{0tqtL(Xs>fO`K9 z(JDw0-T+_)iqDG9Cf%S>oH(siMlT?J%pKl0Ld}TwIRbkw`~ES?cIt@ zdj569+WEFeu{dah`|N_;F^cK^nDR8n*&o1O*Qk;2M=08^W0+Kf(gk@d{m zwFz<}Ux0OYZLtFr(^IT%jZOqxkUvC!*;|K-9C5}_3vmV%41ul#LFr`|iR*?Qe)RV~*1nKo?Z9yx$im1pT^>l|k>05Mz5B*2pvyLcd z8!f^0kz4GKPFX7w3P?uR*b>iZfMDMVahtTa6C`)CaXskNL!5aB5_RxdMeJmTe_y;b z&8fto*zCHKg^Fn^Tu5+YpS3FOz`~OTP=!~I!PPsgu}R1S3N)KeT|5huBjbv~;^&Td z+aYzq#tH|!$ONprpWH}Kzx;BVxH_0_zV`t=;|%`+AW7309-mF4 zyNju`!?;GhDg>^bv68iXIc-yBINgSKT1247=TbM{HP|L|^|n@*5dxqGf`T+746t83 zOb(+}rT1cvTwSZ|(Y}OxWli{*;FOctl~lemV#ta#L9&0kS!AjRIp$%Nyz3Q?u!@7cUTg15hB8EIq6& zAwWQ-y)TdZJp3^Iw1g{iVD#jMXw73KsNUM*{rlH*5iSB6NUK94zFi!P=i7KT+6#48;UN*BIU5YwhiA zsP=ehES>%4%V}@rKCS=mMyHt4>U=sObwejG!$d~M@E%uj$oPm6*ynEc_=w`CQNTBi zNYi7JQ)#VSN{tN|8l3p{LsB!`cD~(H9c%%tZi5b^#VcJp?W~>CrQ&rwOXQ)VLuA9u zqpxFF>=WsGY$w&t9v{oFVa)Ao>R!uCd7L9v3By)r9|Ur;!LnrnXxv|b984i+DBAU4 zwEdl}e*FA4z^|>T1QjaAn^X}*(oS>_(4Sw z3oznwTXM(GSqc8<#5)?9B0q*u|?Ll554StxVEN%*}vE4h$)O1UdNuw+E*yrAw?Xb zb(FSR-Ra&U(W24@*F&dpe^$j$Ow%@#Za{~eJB_x39Bi1c4-o}Je2~#Ad;aoh*gf(| z8UJ|w+I)KIi%+K0mu3M_-1`Ae9}y7u%i`W0^qj9B0Wb<4Wc-ye+21njB;(~IGC`T$ zKXekzb##+$cjxc{Q-cgY=K$;Lq%tTEOY=+IL^CG2^}})=0#baFpmuP05MXLL1rr(p za+|Q(hP9pKSt1IwN7U0cRNls}`c1bv|s zuH&YCx5#VvV3o8Zx*rHC`CW|TCiO+VV*y_ec6kV=e6vbXDspjxEjFi^LlgD_Q4WVh zVnZyYZFex3Iz=0!EsqEruXmkHBd`G2(Y{8iBHz zEtyWJY`@NW5J$XPhuS{T+E5S-c(IwzlA3uqvUD_Yd?VA!U9Ly&k@({0+U(O zVE(d^UREJ+gKMPS)mc&U;Cj4 zdmri>DU8kXaM+VC=iC>rx|7LpmTQ8?=m|+iO zU132`hQ_AxS_ab}{P@QyH#kiNJ<1bBSYTc+2nL!^EQc69oSLO2 z>P-s2GgTA{9(neU`+)W01l)HvK%jU>Nd5F3JS}JryNjyF> zz(L`eG^B$RN1TvWFgiY+e(zuXLHf!|-$bbv)9?TOo9XZTt$&a{`0y5O=HSc8K~sF{!Pt zh0)E-oF>77ZTJB<9e6U#HI5%pA3w-doPu4d{N8+M4y)o>8U&t6Pp7%EgX zPWKwcrc*dZ3L@vhGCn|oU^hehL!&}pQ*&H7`$IY#6rB#jtW07U`#D34_vp9Gx*AH< zAcQQ(89{-HG`U4=Y%qtFEo z5((yF$`)B18bxc!hBEKpW%||0b_W@-YTnl%cZO&Mw}zGlklur5&|az<(iSY_hkQjJ zUcs`?E!UPc_ORt!Dc3Kf9h@`Oa4%i-;P+cxG$)^wIm*VzQJu{7zbo zMc1j43FrAO+KJJv?gRI!ELmW7%(>DQI4ak?Q}53&3;0GT22*}@x+15O*QisKjWRaR z9>Fq!l0br9hw|v8t*vjkWvm_h#_EXPu55DaV`wIBP`rs zi{aN1R5o1`KXA|J1oymre9DZjv}iE=%a2Tiegkdte7HVcMS<2i?dA2Filg3N%_)dn9Con%K&jf zzHq&a_s+W56W(X`OvjWwY(^XM?QH+tAx{CbmVOK0tF8eurwguEV zbh!}`9U)u5$-KLkJPO|w8Y2(GIAb>5s&NkkfOeUctl)Qu-x|cr zC4@{b3zJQ|j@@8ea)b_$Ft8cI2jwk5vszSrbNcdmgaj`Z&|n^pfWhSAvEk|T;m>}a zPMteX^*04Ddn`5|aY&f7z2KqZ&-Q1Id9*jH zik%ao?D8CdEC7_$Hrg^gm@z=^_A=h&AYpBQu0p?u@#MigWlVG-RDzd%eB*r#f04;- zUq}lJcT+zM$-+J6a-$48Dd76##*H-el~+k$pvg9#_Zr5%fN|YpbBeGeD{uci<;xFX zdcT!62$K{rBJH#|>FO9qu@`7VMu=4-&hD1E-$A^}>e?z@D98aWxSnyc*&Mp)So<(67;BA=2G;(5dlByR z#PVLeT@RRfrx9wzSXwb_IMjnHV=lnbBU4bK#SNaA`B?%oa{&FBVSEj%Kuqu2uNbum zI6k7!as$U{U~DYy;t2WsI+KRF5cUJu(8&uU>Bj1K^34cL*r+8)o*m8Pd>0VO@@AO27OC~eV9eu?l^JA*np z01_3MS}hOqRp{2Z=yl>`@f;dP0lm*1mG{U?okLV{e|WwC2!S93sW1TiT%gfJ-heBi zbbt-q0}LY9bJ=X^6P3oiKiY@kDD-fK%#+pwZsGt%DhSUKDIdr;N4urmRA}xI03#Sa zJykwQL*yRt2w`|MP-Q}g2c*HQ!&)41|25>qwcg^~+F;whtLA4`V8uPVhLbb0S~zhk z%K;`6`DiDm!C-g|6Y1se;h7gaf8;9eZLX(*%YcMalPR}EPh;fGqRyid@_;Vrn3#f1 zc@QC}x(&4tx6)R$@;Xkp_cl@ovM=NH)gS!VL_28JbMsnSc;ih{oj!s=N3L)dJ25B? zQiKF(EKVF%*Ip$qxUq$h2wvl;0Xv4h!F%Qmz=E^3-9xlFBIMQ6ZH_M+psaSGS71oS zE?i36pMJ)(!<+z08dMLL==Iy+8FHWw!<=p8=-&D44$h*XPl_jC13EZs8dtk>#1}v< zB3P{R#M}&da17L74+LE*Q-PfK9C@qCJAx4BFVHH$muE4p!~(ftiXGQr)$q*1k+YK7 zkiE0)1+Y7??ix863Y{=nl>rzxnLKmc5US9zd>ry1$D+felgpXr&j7vrOu!@aV&R?P z=Q?YnBWjr2=!F2(>)U?N_d4my6dfIXtN39Jpdvp{0ppyvzW7w+9|4clwnxQr0zrjI zV}GQq@^?;M03V&ZFu>>>Okc>pAX{foP@|I<-4@tujhw}<>g^oH*FVD=ab9&O)im?nB-{o&|H- zqmky?7CuEzD$i;~x{)nYUvxNbvv)N>L5AnrK&SW0uA4{-`} zcxxKA!k_!GTHa#pDs4=%*n}dwr;8|Q7k|%_m-~PMFnIa_1+3N3ze8yge$8=-N zT|V>Z1U%W3buzC*AW-3$(c`_GE*_9Pec6hZ*5B##gU4a1$Y9!PU=`9#ex(8ffWlL(XJgBIW!iL7~~Bp@N$gMsb)1jwdO*f!45yBJX%Eoxub=OgyMD%L}Y zWYQ~~ha!8dp0S7a5a}0H?8W-Q77-9-`xrfP$bE2FcrKsMvmKpnC~Wk1viqi7N}gds zToUdJM;wP^aFny4?%{0dQ2Bg11sOprt=4RB+DpAviGnn{Mp(?&53se^Q?}n{Ib^*} zmFeRC+W6i28sm%6wZk+@*r!-<5Nwd=cSdm3o1L6longs4^UB*G8;SKeIm?_6ZVafL z;U)}^7FY~(NPQr|u+uOiObs}yvtgb6UFq!AD|o*o>(gUye~%CvinT_5U)Lz;pNG17 z@eIFXiFxuY3O+~9&;+?QcWHez_1u?Jy_>n$0FYw@!q7qw4=fn8Q$Rm?Ge;D9*)70{ zD|+($QZuY-I2HuuceuWa@UkfVvx&^xP{NdmhvKwoKqh$vmyYOHo3D|RghAeaxWoXAv#CaCD2xg1 zZH{bM%4-T3y6Wn!0CZJ>ir|&Kh?Z=+o;uO$<$nvmeDY$&hSqh7#2fg zSmD6x1+fwVsG`FHVVMBTzMuE{vzq(M`WErLeP7-DB%aNph%Cm!&(f2$(uQA0aYE!A zMXH3!@vSgp2iFq(RT0`DSi&Ft?hn%c`9J@o*xN(E*TrYfrqfSP(eh?BjhrH7g@Z5v z(Cr3%nB!xGWelxgSsGRv!#?Abd%s-Z zuu^m}F-;4+%g7~80`0DvpXgbmfvh$)530<)gN`_808f;rX$R(2%9eM+kW^rf zWz?e>hMzY`HL;Ikt8yPeK?e?7JdO6!1)xlhD>rBOU8BsK*avPFKcIdw$*Gra4eMNI z_4b|gq+pO|IHCZkMwf)qNpjK}eQEy2uW(Rs63_|6&erP>$2+#WUPjvv*@S5+B;xG#^V2H@xZt!Sbt`kMbWX_ zV&q#W~aDiRn(1B0_c^*3ud$Yewtp`UOckH7E?!!$k&r}~(;4bxU|hm@x(Yh~>~ z7Xxq1ecxvfZCBPoG%HK4X7knu!G0hZ7<}1w?h&K9$p~Wm)_M>$Opy!E?z+DL9C>|o z7!c|3<$h`$%+s^yu}qF252eWGwQ_THChZOE*_9RXY(*tEuk}9%W(cfh2zzk|b1+uek@vbABMJpC}F#t*gj4|!#8+-_#1Bo#Gp5H z26O~l;Zh}xFV2Da@J2WUR;IzpsCm9xDG)rPdhPQy4u^uz^S(S!@og6e&k7(k$JkgB zozghKu8EV}pjKpuK_^H2iKM}L!M@={9k8#%q`b(wdVcNw?majO%-luOR3@iKQ-h+& zrPWRHJBbMlE}u*fUcO2DxvG8=WM&Jimf$(n}Zy+D<9^ z)M%K#<9?dYEqJ!WM;09lQU~l2NxFG2_Gb^NS4=up^W*w&JJ z*jCTRAP#axY{r3cer$PAc4o9j*ESnqsLkpY4cdonj-v zr%s(p%S+4T`r?4_nu<&>xUGZFkLUnpIlYWOp3-wyX%E|#@>+Yp zPY|WBKof(?&pT2d*vjdXO_*j2;u<`=dHS9?pKEW;8VEih$6@FAMA*jKsdfZLeu3 z5x6IEmL9GU%Eg0HX(D8{L9^&BTB>10hXGdE`8%vP)q5k8X^FN=dh?DI+(GH}q3mw{ z>KCc+!j-6IY-f}6y$7_nBD@Vvet_4vT>2=T08>D$zqx#xUW&c+@GPY)?M^xft9HQL z9s3`6%v@xV?wHRlw}R!^Z>I& z>W`M;Lun!H94HP3htS>95Jdhh3c>x;n-`dx>zIKx*2oHyqIC%Z;<|5P?cg zu&Z*>LGyuBj)rng6;Ux5N^_b~RF!kEP1W!#@;P~nsSN@R83kEcTkC1?P35swk?qn< zbulHtwn56DMRCUh4dle!={D|H#(#TlBkfQ$Gk1nCKT(<;olp$p336vdVH71vy-(cdWO&g zc6Ko3#@1n7oN))(*s@P&*(ou&$)SuZ$Ga3?k*31hJ4$rA*q@%baw%PZ=hx^2rc|Me z+F)Rg_?&i(-yWgta0$^%zzp_?=Tr$|WC)VE8zJLPwbFc~3Y9o&vU%%CG*fIM}KX%^}b z0c4~?1-LQ_VO~4y!+*gHxt4qhV#68b-x1;9XZGrJ+ML%__ql%Vg9bZv2J6tk0ctsw z>?-SS-NFI=atjzh`w%H47K$_^RXp4k>lSSX!*bkDy`*Vve!aLZ8ndKY|!0z>6fGr|yKc?3z z4D&HPthvqf;k705HoMcMXD$Gmkwx^@`toi%H8C6drbH$CAbQDgcA4l#h!Jm8&zm!B z&5RKcBX|~)$u!%Y!%jDH$=dE!>I zk7L9dwbBIYL}X-7T`$>mBM5>`$qxGvTijN`XjvO#_X3~6dwH-x>Jv;3=F#mvahfRl6C~!u*z#CiZd?3C$eq&?+qt%mfV$YUmVP=YnICLB&O@9Ti`d#SkQYQT{QM&JePoobIq0SX8~}+> z+s>YlFFi3as(}q)YQ07~a(Wu4Zt>ni*iutPm2E+~GL~V@LTR1FdX))ncDgl)%R8{>JixtS+_Xt9n(U>YcubCbofjZ&x{!4?Y1%KaP)vIAzK z1D5bBzx@rEC_K2EG z5xrX+Vi}Jt#-@M~iV!*XU_!f|9e-ur?aOC%dW%vw>>a)FK=>KiW)@opt;l_sx5=xV zoui8a3OWvm#*Jt5a3k0QbO6}@=&f}Andejc=s3e4mO|jn?aZhcS9dV@WJY2bSEEp7Kbbh^wc7$rCPW*o2X8n} zL+bPKKO6#$6yYlbn{yC96VUo^g-_!v%M`%WedaSfto}?8Y`&Mqn*Zba0$iONe%7)( zp+p3|ewM~7?g2Tl{YdnG&o3X!1}4}}KQWO(zx%w2N5BISaE#3S(lFm1d};~SOV z{7{F`%5r|-vZ1!wGb~CJ{dtnGAj5W`yO!pJ^lA&|o%%4-Xy+*L{ z{LIfo1jBg95(vBnJi!=ZH0_b->jebnWXZ^Xk@Hu>i#2Cg@E$Go0QohP(GCV)IV;lz zBm?uia_!^v3k3+Wz=D_sl++GJr9NZX)Z;Dce7sis!D<3aAC*KxMEdgK1#zK`Jv@upQ^OM7M_n zn93~G$-DFS(%{Todf^8@NT~G@rnKT~5I)PJU#6L;b3`$f^q4~gOfxYpLC4v_ z3;nQFWf?3C$YEGQ<(kx?-eDLL!kQQx3pUy)jJ2?FYWfsvF`>*cM95o(KW&egu@FU8 z+HfN0#bO8~A#!RnrFG1{P2N*f`%6qT{lObu-1O%yqZckIj{@Vgb_U(kf=UA@_da!}M;RYfq*ATV& zSzKG78z02GBPGFS^^4!>r0Y1^^xpe;l$sC|A(3f6)BBIq5Ooj>6;1U4RJP1=4b?^Q z8s2ZAm%1+4Y3|vlHDyE)>sLz`@*K#%`pTSRU!+d+JbAtw>Z6E#25@5=`IEJO_2Q2h(hRh8u5mhv*C0nfn;cLuscUVpdk|PJe$imo}s$!U3 zHqpq9A#CL|8s8(sx*HA~5Eu;beGKqykWw&!W7E$)s?RdT!SwI`{k!SeFMW}Kq-le+ z`@+%NW@NfS=o=s32!=}L-U@mPO|6HK`4&u=zr8?BOcW#HKnw3*fn))PIS3i{%QMl-S$0CE1$CW(TlJm}ku?wkclD|5FKu|NMgQ!0!O*av zadkBe59&msA`6yf@v?;eG8U`xKv!RU3NY!SatKk($81O+#->C_#YXQ>eCfqd0=t`Q z0fP7FYwIMGd!OA;eG}*DXy9CN3k$d|hk6$lNpSlpiCd+qka~Dy4mAR)XJW3UV8bx9 zESHR^%B>5wsELxO5sGMV_|U!)_Nj9)u|_CTrgH#>4wr7Fr@#9>^8FA*hZ}HDtILGg z36tD->wUUA;B^!*=uF|s;j!D`_#U3IOkZ{7R{HAKU%|t!rg#4AO~R?QG;?7NAl!?B zk`--BH*YR*T?{cwv7K;X4dH9Tm{ghec7kz{t~6J&rs9Q_JJ|$u;~}<_$B1_2&~RUb z?DZ%$pmGd(-e-teXvD*4E8^Q0E>iWun(!M9NU({lkuMeSX$%Cn8eQKj@G|7bkK>^D zdx4m_No+SiHw1?9MrlX;E9Ak0<253gTm{5^$rM>VtPwTD6Rafn9OF)@#BI_VZszKA zdgfa%z$}vIO-|qdjonW$ZhSl+gmJ`R2$75q^bJvz!-I9Bkn90zDyX@(hD8JyvU=UU zr_#0S8|i=lC;us}Y~v;NPSRcm$APfhF$P=4uAS@Gh&tHAR1G0m@DK>9Re-@9@p%?D zSdij#ShS;2X!HFM_E-3ME$hhJkW-Z?Yr;?2!BFJpBE?!GlZFYSum*5qcveQsV!*#B z4r09whnZ3nBUIT7A%{vk67jt-Yp*xzJ!;Iw*s@tN_bMw3?Nl%VY|{#!ef1^!B6Ai{ zdK%ng6p7(OXVE()uE9QTt>RFUxW9S#Zki%ZXJv!59MTcIb{|oWo$EJoOklugNwqh%~2RfaR9H+A+tN5-T=!16#8*8kjw(kX8qw317j$ZnB*ELG_(0@Mc zyS$U0dFGk8ueJL;gJ+AJ?;=hsn*{)c4dS{2I+bcC3cQ`dwygSwz0-kg=+vqkJzIPU zN(Ebj;XDdfM$LbFew3YHBzUcRsc|sfX!e<>Q;p|n&>6rKt_+Ty;X;Hm43-^kBBzg^ zdFE^0`0Nh!XHH`ezzCh?hw!}KXMA~p{kz{-aSJEWVMPu%tu5FxCWt(-dud>FU7svUk2_n!#8a2~2{IdST zefW#}@|x(J!P9^XN|PTycRr1FlQPHl9B~h(?|H8}1om(3<~fWg*b>h|Wkao83}dNd zkBIRa(>N@-FwawPDBu-9bkQxrHMP*L1O26*jC@9xtOs_=QLXGK&OGaYZtdpUc72-$ zfSi!>cIW9ybfI(du5G{t~{PL{R+xXSaM-@NyJnjsH%?(!K%tv(DOh6M8{ zIOu?59blPUL;8k|_jV5gK>GLSF$<$CkQ6Y=s48;q!|d6)7~oSh6_2x3P@VuM_UP{B znUuXCJ6I=EH3U6&Y>*W;Z`KG|A1MQp24Voy0k_CgEuu%9R%b8bO>&6$_|rlreN)pB z98XUrahjYxh2Dahi--z)kBv2oV)Q~sPe##om5mkX+CEPkX; zu}=cS$mZ%4 z)Q$!?G^hL2nQ3%sh8CzpJTvDR%rrJb1e~>AT3Tb>h=gJTbxLhdX1bolm&JfF0$QgB zJz)2Wkx~1m6P?0=R)?v3WH%qxK&S_FNOg?RSOJ!uA}TYkCa{>$_1nSuGq>8Fw-aDI z^@Z%Zx!tD9bPkWD4fJkA`jGPu>W##P$JBdh%XBs+%xn{8?_lkLj*hai369G0oSW;L zIEy%I!3aZ2@Ek^u7T^C(>KGk`0pBHEZj8002$`q@7sZAnm&lj|fcT&nAa321gpR{i zOzkk`qmz91(J2^g0ItrE&apCR)XG6C7B^=Qq<@0oVP%I+MXF%tjHjZ>HmY9W-) z@P?NLryQj#&s?D`2yIH%*AW8ysg~kFdr_J(defQu+euK{xrt_2_7CLeLzbouHV5d?Y)(?2-6<7 zBlsBg3<^N6ABe97q&NW&^Wm-ZrPp3fL#L+Gg>&cloJ#uS{rBl(xIp`vGLxuMx^sJl z99)<-dKJ2;2Rx^vjaBj?!6GPx7*vVkqbO_n9-D(##z8g*&k9aiYXOMBDZY6I!*2Fo z&2ZSzRbeQ@=%Pr3PkdPDdRaq_i;R%0Q*>G2H97?v7Zxszw8~r~BB(M)G1?&^u)fA? z_#p~eT$%SP(EdXZ9)h)D_kObqxMd_TgCv>& z)H?U?vtS7$m9QT`*PsQ}2?kXo@0Za5oq_0G%UbG*8wQQW?LI39Lzcs5)&X$xoJ_U% zdc@l3;d*#n0>4ZFcmzfp9Cn##_cLCju>;g-+zz*)gYrBu;Sp9`l z!sY~n!#xFW{nL)suNB&S7BNyT9`PkaPZAu@LW!k%CFuhda;;Jzw&a*Qh_U5L6u#q1A_Bh zv&1>A(b6o3!P!q?!;E&L z@ON#bc{FKL*5Th-M*)S3H_imF3#agN*%D}ISUiIQTld#A70>OX{=G^ex0|{qCh4fL zL;m_ybce87xdUZv4qq5qbe>-tE7@ef1OR1>fED%14F8L7qXb?*3e#8*8L`%I6Yc^% z5@CM#L%pFKa!&j?f~25?)0xG&i`;zV{eTvQ87k)+?2CmVrbjecf7zjiXN>E&V<7qX z)(xNsyb&G0$W7<(%I2d2Ta7(egm>VZduZh*^B(nzEDzx31Tc=s@mHRk*Z2(n>3MKJ zJUg)tETH*`o|i`eL?+V6lHqmNJZcrVd{AWWBl27$?{f4sx%TYkr8BgWq!7&T^gbic zb?^m?=A$}P!)abJyhT7}7k^3u2*m|TVMDmDZF=Y`59ToXQC76a-l8K8ssFH*Vwte6 zOl)*oVGRXIt`X(~c_OvX5HFy15Cc7g;hsHvA^rOwy_>p*M(B7nOPbIY@7G4~YygBl zY8o&_;FN~U9(^5lA3^d&en zB}=)HeUk{W*e}|q@iSRZ2bTzx9iku$E4!;4nkuKg3LAs0TEOO)ph_pfdy)O#=gb}= zyAB<)b>7w*=O2A0OFKL|#5I{H#&x+Q*QTF8XCm@Fu?M!$D`WF@_;i50rZ%C~R~$KF zU6tn}p1~+$klyjKS&7)|mX%H$KP zQ%(CZ(iFM2?6eM*^+ltA{$beYb-GWOYM{;DSX>7D52iDZT_o*bGi;7^H9bsE8({48 z2u_Sg#%XSOY7QhrB&(brrgQ62HPvN(oxbQq;jrAW#gH3i><{%2Iv$(!3zwmn;XP5u zbAP65%DNlracr}CLUz}EH<~S^`HUUX zCuH4y{s)j04bsl`#AQ6MgLvw&zdD*8hJDlIjF!Z@wb=h%^p<4IA^OwSf9{p(c@^~* z1glOzm!f+^cUrjiaVpSpArCnvcIg8f+SV^RNU?A-+|gEzd(l6Nc5>hw?@=3S%7lBS zN3X0HA@Zynb+JB1nKb1ho5nBxu&F}R@NKyvw$ z^x|u;=%8VksGLIpGgR51u=oPj8y{RxkN?)!s1_XnjZ-1W-5qQ(q7cx#ji;o$HWVRx&(ucy(#+X%rltw-WyK^FVB@fuj zLbN2~Lv4D^=d*~NNYhBBoXYoarN>`;m4s~sg#0XA@37DLI%6iU-vO8sZlS|OJE;q8 z?dCS2&=B%VSD#Lqt>tv#u^IA|5E{}R`Y;xH>}OA3jQr5G<<0a5fAAK0LEYhLLApm} zWCMdt2{up0eW@k=;{F1dJh&A_It>7Kv?D?J+bn-XFIb#!5yOx!^mP z7uMTWNWRa{GLMkU1(?rK3|2kfN~bQ5rl($iD$TDg08BB`V38h#xs-o2c z_h}4OqP3&V*bdW>iZswyh|Bae?jO34-u>l5`se?GRFOkiD2g)UfzhV&YHof=k)97qrBabyOl_BF{=Kjq)d+9shOeGHU9tZtUrNtuK4kt#OqD1SgLsi2_ zSZ(9RXX%O8Urxte009*G9;~m-%+bOU0Fo4?sna1lEGqZQIs$;!K6pR%PtT-7>|6Zs-ogX=Uy`3jLFpcOw{7IsefIX-=nsZY(8=Khg-&dgFfQxI zbirPjx+AKm^TQJWTI84OG}vbr!k8D@cL(+ZClVIdNk-;Ynm4{mo3!U&_;LWu4W7Np z8LqG&Yyg|##=-#boED~>;8fI6;uf@-uNX>{J&%w)I?8?VJPA%U5*~rb<7VwVV;0rA zkG3U~wUueT@WKn^tKq}#37@CSaJ_l z=KNkM(y=pd+}44zVvc3jagy!N@N?zTd_)ura?E+w(crv!<}1jt! z2~Su`7+*dQ+2sbY4FYo3$&KV*b*2?%BL8I@rEG z?jODM<*z-D+?YoXaHF?077GW@`)vXB0SxURIwCqZ0J_u_Mu$Adruo@F-P#t}{|=OZ zT~6$4yl))}jH2MYS|Nl?9GV(k zj~}PM`n%sx{UhXX^ByMx3Ll+1=-&CQtf=4Yr|K|* zErquK!TKfQ7h-yD~9Xp*HTVUC9+>RteqjZO0J$Vcwt`0RI zssWqm6gAU+EEY40DcCnHL`TB@+ZI}y?jSIVR2`m2Me0uW4MBJy+q$2QrnM!RLu5P? zV{;R%6K9C~5cn1u!efqWgMG6IRGF**qRQZ%GOV(oP~hl|=g}v_Q{$hvITh4X5Y9dy zz`jaO?SN6lEnTpiolJ5Ql*>H#_7REXtUL8cVNEuA@|+Q=a^Qbep1Kl9JShv(e(q+ z+s6prRuQY|gw`dH&(3+)_Gs4ym1TgdcF?pMWulMzj`_PQbW|OWl&5Hc$xZnARh+yn z?11SjN9YX&Pp8j`pUO$hQqX~A@9YKdmyW7{RGI3A>FqK7-iHtEL-84exv@=+37R7&p9>K)X%l0E{_ppZl1GO@SLXUD?bbg zKreVu*&cu;3Ju2Ogf0eOd*wAe8d?h7xgIUCrq0i$*~c&9g_I~lA$NucDf9Q4sVS2A z@1xL*shj**KL}?TubFT&n|p=>`|dj*r|i`E)ZRaiVJU`3qY{g1Og5&CMN`rEUOgT| z7EURNYCR68`C;6Ccr{Wi1AdR75Vqxr|+jpa!dx# zp2e#%cZ60Zd@C4(Jws{!&V4BL82~Po+bE-Bs(H8R)!C%q>Y$aGdk@p|uY4^kItK~i z4!|0jA7i+DVseVj$uhZWITh*Zkioz<^|qM))Hit=kBBg^3Lb@dfG^QlGDC4h~~%HQp`iEV)nUZpJbOb3bG#N z=dh(=G~zYvD~o^ybPfNf_k6)Ju15SWw6BTjbT04%Y=0e5+BFkTf&^9 zDDV()GWYasI{RB^(kjefdlyCwh1Q@2T^`T2Ksd?DX(yz~hMFJLLH-s`!Tqrc7!sBV zpg|rKAgNj%PCx$92aMWea7X({DpLt##aN#^F+ZG&)m+sb#vV^`K38J zPnf4w#koP_$NC8C&B3zOnc+7NhsKke;<7C^Rk?P3JK1ACFbs<8cFgmu*nF*PVTh|v zp63vbq3a=g>hBFrX^aF?Dig17p1W1ovLZSpuD!-x@7#R~=Y-Gqx-EHV?TyA>!ygJB z#i${;`tn!O8fT&nW=eK~qZ_S(WK+yfH7|Xis6!6py?N_ay6_S$_|`VL2Fwida!d!3 z(r0&2#u)yoS(K%@Zn9AblAvH|F-@Min3@!F3ZyzYM>VF|WC4(o*Xdrdt`#~SbPi3X zfpd?8E66=09S3z5x#`_F$(40LHS%9V0Rn~65K5@B48x03+lCz>X)@K(5&N(h>!bl4 zphyqs?R&s<#xH#J>*={~zm}G6-$StyLT3F zFZzHu0{ba2MbNP!V+(9w&N?0zEKEHbjlwNNgN?G3u8AQSwq zNwiBm?7k>70g~K*6fr-HbEG`*>-e_@LRw$GkjpD?WD`{YMGS2_5Oc#1lhe+A* z+-h)TGzBoSYXSvjNTcrzdDB$x!)a!(40C7s9MziB z<}9j-?iH>#7nAwXcIZ^fGI?IyBqKA?h7b9y(fhfLjuVxmy>y;I4!x|e#<&Za-hx@| z0+7hwcW@qh_}w&NYKvM89mhZj&YXp~uAeO=?a?my9w6LrL7(7Yc*G*08mR%a$K<{= zzMXXW(7>9C)x+OMCm4MT*vKNdqsaVe!mg9UqqNL)vf9V#)4NOQ>KCq%PrRYukB&f> zmMC=NOf)K)^y~LNph$;;8(1+xga9XtPO=zIP%F_PIkbf%tE_7idjYJIgoce=$yPUH z3yCm=^Tpr$xt?L{RSL#+z>P+!lkJ>iapV@otJ(qCSlQi(G_h}e|>4&6|Sc`MU=jgnMX&h;6%8QzQA6>aeI>RwK$Kt3S+OXOI zBOGO3+(T4BJSRf{Gqobm;xo6QEdu`je!3lb@v$&=~ z7=1KarT(x{fE^j^T5uwEc|z1Ye1~Z40X{wauE{O~j6fV|27R zhE0|c--orf=+_|}%AjrWoY-p$;~^108f~SuPp*>=av|)oXWD@}50+Ob3>{$D6nXBv zR`$3`mVt$gVkM(54*oKoK{+#T+D;@6XR#hiht94U-mlJo4@K|U?Bsv)JlHP41TD;B?4u6s8ky(7TI}p&FT=MQ9!+ z6QL9YvNbUTMa^N%k$YstU@#^aU3)v&x~EP;IQkLr4O_v_Mu>k_Zhw%je)}6JZ{Ej_ zbifK}T>L&HjI>6$z`VXNHV8*MMGia6w(q%kE^U4EZu-I(&H(7P(h&^g)&q?A@m@ML zIfHR=VoW2w@spoXEJqFw6^b1kzN6Je3@|#FaPJYyDGT#qL7zoMc@TYd`7`WBmgzWn zxE>;fltZq)LG!Z*<_6-S!`ju!iM6+_;X6Y{Y5)&hp#wt|>@91>mpP|~KBC(Iui^3t z4uZ-4=GZGilZWk5dV`@Ym7&H$yVXD_WLtGQG$tB!L7Dk>)+4Db&8=~dqOgGb!~A2k zRBi|9!k5mc@hjtLn^DURm^k}?_Hob!|F+TTg+a*~JwU4;?!rLf9jb_ITT%hU*&}zL zca*T-T)KX}oPO`$ybG&K{&RkWHRJ~Pd>#u15vGX)f%M1E{GnbnTg^jo&`|`2@iMgY zX#=bj$u;s6wquZeiQ#w)2FwT5`1&1fUwE!Asyj6x_CWMc0TVB#?=8>ho4G+|u6x3oWyxLI`t*}DbonB( zmeSPhG@yi#BBM+97M2JF0WdlTcppX{g?E6HIe{_UUzi8n!SqsWP-o9F0L>iEpVOI6 zc$Vq`qj_Oc^F)EHKtAGq*?sca2p>0h)|j@oka9S}Sj3bW9OIkMVD&mV<3}qGn80{C zRdAB4oY&CMJOdX`J1{2xdv|^T&^n&Zzw|6Q_unEaLp~X~^pUE<`OI+kZEI+wbA*y- zhH+>A2HE2iLW+(QK4#x#tW}ILJcYw0+p~E47L3;<(WZ5t#gqv5l`r&5la_D=B39Xb zT1ue<8#v7zqd0Sh$rS+sf~i1qXs4uPN}5 z@i&C&RSfqD`cVbkw!jcZ2LK{?wk?_K<@?%TP}*^#5GbN%+vrmaJNq4bp9>^^ zaChj3cGks6jTQKLl3eqwk88=QqwCN;d>$)9__!6t<8igfoHY!Wd&xOs75NPGyYJ)e zSO#e-mkOj>ms5hz|~3N=TRKT{kzXmu!e1MxQP9lFVp$pv5Tjv45wfb zAUTFJq>(p&UH#<%n*kh$F4n9m&;s0cL23l6%lbB1AB(NJAUOn5u6YrlWF+MD*f{BK zPJ`U#v#9w1jLH^vL+F%o;B5BaLB!$|3@c?gX=ZdRO%0ErYjyrd(*N=g|8;u(zxXd{ zpEOK#(A-z{cn9Vcx$J`7El@1|!8^Z6GZZxmzREiQOdOOdK+f^Hwh|NEhV5XF$LMJ~ zGBE+jo(KnLVrH7|E3&>sIdRHaS@bfj9ZaZahph?b(7e#WAzBimi;Xhcy+%9S)lKaI zx<&hI9;l$voKFk71WtI{0YIZqIX|{B_U=nUqBWB^e5;vW}lxve}!}udSRnGiu74O zp_sWwfx$5;gcCF8kf#x%!KOE4Qir;^&(j(=WbsYqN_tO^6e!fNYusz_{Z{24S=wX_nD6NBow6j5n2-lCZzfVeJ zmgk}8vSjg_LozHL?Zn9l_LQzVecY%0x%+zt)2$D#1(9bB6|OP& z0Y;+EArGlhBw9X9ifaLz(+l`5a-Q3;Q5n)vy6`ZJYB)zh^7Di|?;Z~LD9&a(#N#0N zgMQNSP>3o+(iaYB7uAljH6+;1huCIh{w^s8+}P!_(`n`YJc=l~DM=4uegZBn_mA@FS~PLiqjxKDn- z&1)u5o+vMYSq|i1uWtN*JiY0U<>__b_uRVsQv2FnUDbO}&pzDuNR~xYR^kl_6lHmp z#6Vyme?XADNrC_lkT-r60}*0SvTOkf6WfYJ%alY)q)2f%!{Mww(`!}NzAv}#T6f9k zbM7!j_H=dCy}$eWJk4v|Bd1zo21p6U8b+8}aLVV6pi>8-#1GoFjKV{Ar= zN@WX~rV;u@4Dtf0?;RZGdoSNg4?g&aQOd8<$pOcN&3f?c!${d2Z(eZ7aC~q=I|w!T z7E&TKN8_M5VTDr#ih^UIfGg;%cUwBe)M7XlDu{qwyn7Ch=!^~bO$TWJ^Bm+X+}H*V z$2gAM1k{Zoj+}^z=^i699rfsYiNTehr9lp)me&Qc2-h??o?Ks^Hvhp$V|O@k3Yd08 z=ht+HD=6*279T@ARgIBIpyPFm7zPn55ojH`!N6$zEZzFtjkNH}6?n`b1Eh)utotm7 z$H5^pIK4vGm!#Ll`8o){?=OQEsEp6Lv>&P2D{qo5ypsOxdympbpB#hMFX6`ETzN+- zSCB>?io5d$HMV?jAOQ+xQG~qID4O9lEk0xJ_N(de!Ow7#*pokwPZLPF*W4!h1$OT3YCMn#v zKc))2k(O`XPp|!_OX$i8 zGH7-Pg$-x^`T90?gA^(sYw>7Pxk8rExS=;ZTXOLX@I;YZe*;4)4Sq>C7$r_|f)NT) zU?W$bP0{>i(5Vxo%o@;NA_LYwC^O5;tB}_i>5LBb%M_`>WjL_>UTVM@)$dkub2;?p zS=!my#u=ulgnf?gAEX*ZW~NEqIz39RrJrYDtb>cs!RvW`ey8IxseB?sNL9#kjq?}j zaSY`c>zJqNu}tBI>G2Z%X7{!p(LM{gp^1H7M+g+b;R0AbC}GGO#YfV8xOgBqc|OyB z>VTRfvd}k+jD-k_dh;A|a5Y?>)6PD~8qYZ(pq7&uJ$rSk^}^zPU}NY(i(#}Cp0`Zs zHjF=O<+}HnTB*Kj)85P6izvUfC{}-0sQem|aoj&0OaJDN--}VRZPsz&Db4R0lv45h z6BOe&0Qk@`fMUV{PO-K^`hw>)i?b?MRELo&Wq!sz(^66W5WHam1&cb@x0q;n)}sHk z4y33t{Kz_v7pit=qnman2uP8=ql01k_!)x&xSn*D!}DI+-f5<%8&c|!0c6O`*9`a2 z0VyqFyxp^3{PY7V-1$BF{t`sgalBTmvj|kz@3RFsH=tQ-1hM&jxSp)D-4X)O!I^O) z*)~0-r6Xv=$FPN5&wy6v>k^-4o9;~?{`l>% zInoWX1S#AsKFDm;q&)wF@8jpt!V5czbkeh*0$lVdF(IE%zye+1=|`h%fM+<`2=cpJ zX^(wwruW}m5Bq8eX_2!*EL$AwDxC?59-R%cX7OBnhRCvD&X9T;U5xCG)G~rY_A;9Q z^31LhM1SxIkdkvZWE8afBf@XCN*&kt40`VPo=fm{&Uj^mb>_WX=M3L#0No&__u)hj zki!AGA?}Uqbjh}9moB(R`Q_0@78&g95>z7>9_yT7434WH&A}FaBVSE^cm6)W4tDUt z+wTTQnJ4hFo02wylGSi#2evxn{%l9-m!QM>IoZJcWqPvqDERcYd8=}7r@IFPepfgz zINr#&I?J`(KR_>4mREu&frM~VB2`?N!|27wP=q}oEZUI%JEA ztWm57mxYcZ4_Ze=S{M{>K$K*Q7`c`O6us=;E(&T$L~RrJMffT^drwn&?gow+#C#Ml zXBmf*bP_3T3%#ze04IO`@6tDD=xyKN#cPXc>*G%%%`SL$2nu!1eU3Bd*?Hl>S#e-? zN!8XwCz;^!m0$e^5%+ld8(;fXkjGEBPn>@*^hCR~JJQ`_Jbt60B#IP9g{M8bXB5B7K z^o=5GV`b+B2#m(V2H`5U9>|9ZOe7bvXG_Jmaa`G3$>O$^Q9^Zwr4Yyox&Z!HXuXaC zuLBkw8;uwHCXzWo0S7mg>xv3ktRU3{)y!HPrGaaK0B8 zNEshf*u`g52xx*c97qPOPKbcVDd4EGcew$MKz}j)v)}m(&`BIKP+hCB9r&PZgvRqg zdb50T83(3iSj*3X&7ne)1t)@)~iRa*g5vVj3aebP|AX zrqB$KVI5mJjUpLxA>~^6{H~3Lz`n_Con5Zs=el>7ps0lkXb_QUO=p>?SF4W=0Igt+ zIJH>}PmKUdl-Bg8j?#z=%C-11&=6HOO)5`LUYbAC(UnICyv_cO&ZI_GhuvtU!Ts4w zvhjZIQ@!Rpjo`$CGOlh4`S#8j9=am=IE^;73W!B|zoel~FW-3vA3k$6fI~?YE zI#51Pz7q@cY4WvK(-=s{G)89Q$CXqd^1k}&%W1H`2jY8_wtq@sF*%v4pZzTD2KV@U z?iG%$7wbKH9DQ2laN2Yoalvqq%4EHd;H6!t^GLe_ck?`aKImscU#vgfN)@8=B6?;U z|MTFSDN)zq`%RbAfA)|6=k(U^{sD-?M+8+EA$ss4+?-o34g%JNw$j5HFBsvIt?1&g zT=LuqEn)-b47oOt%U}Qt+-JVZ-mw5?s=B-c*dN*qCy>3JK=Im$U&LMqAmrY);d}Se zeb5*J>BKP)W5m{&PP9vhN_oI~m~NDVxAE|C>Y(5)bm5o07v}?c_PWxulqU_Ad_F%< zl;b&c4$)0{c%rqYFfCTn7+3(5Ro*=YoC*74W-$y|DGK~VKAYDR!R>(3fzzSWtS8_c zgT&L;24hBt92}G2uA^5P)Iwxfr^`#)7hPZ=FVN8#&)fxEc-HmS$~Bw>0yy-LWf)De zG(r90I(A`k6ujqCKCKx8od&05Sy65}6Z|inGigSB+R^d2!KpVJWbm&3iUSts3pwUr z7TIXD+@lfq*XQy#CVG%}*4xyq{C9(2gV{oMDvCSL+2hNZm(u#lZepT2J>#Jk5M|{od30S#@B9la8S|)~HnvZ?tO9eWNK&vU@+*L`jPORQbKlt$n{I;KNzOcZaq1yoi zoObB20=sc&i@)-(2i@z^2f?2)N^3xo~coK87^mC@s*t__0JQBUB59)9>7oy8s^n?*n+ zX*qocDcSEelfii-_}D(R;D}la(C!Xr=-^0D2fI#SmhYjj;c5=Y(7E*2FA&(74O0dT z>a*jt1ydJNb0pe_vW(gQIm`kJS!|9SW%dYa^JCamY69?f2|AE-Zp-=)hx~9e;ppe# zuVxvRq3$xvtc_6~?TlcM+2A>6m#+{UqFc8B29TLci(fGZ8UENYBZpoY;=rXkh^;yr z%)da9d=wyJTQa#>XfEh}?pstdO_)XGUa$cKMNKc5aL+B9cP~nNHpSia-zK#?Hvz1+FKz5A*FJ&Gx=*PsSNzwqK_P>D}a__ZHh&;f)@{& z5Av{DpmpQ^4N?jWn;^xW1(orJjOMFUTFdzuLx>uT#~rWW7NejiIRw+(*b(%F)6H$V z9Gs?|rysFEtEsYZFD=onfyV&)7xh8lKpDy$0Q8VL6NigvQ$&J;Dd&d|#h~H<5Z%~i z;dJ8(_sc=#fI%<`qle5AL7EDNPsr;lEa(LbHn(z}X;P$*X$&sk*2+pp2|j=Dl=eoe z!GFvQam+aA=Ny34hj5PbeYE)?ee+j-3xp0N@?<}hVx>Tv5uEl`h9=W=6WG|sNWe$y z5+IZjavz|)u!7S@3LU;M+Qt9?KmbWZK~%vsO6%d!N&UDy*@i%WLaJyLJPOAYeQJ@1 z#?$8XLpEO}gH_;n{H;)9#6+{~9#OeMbL%tui5{W!(9Q*vK@_cUEs5Y>M>NV5s1cWr zlBrDp)~PZr8V)apD3lDZ5y8slS$_mxB=BQx zM3$Tgqaib;XOWz|>?{aald4qHLoMhhp%76VC3wpYga{-KL{8_Xjx4LxxV3Pc{6^nT z)YtcU|Nc*|@-I{5d~6s#0~^2Gy9<IMCmg2SQTACa!BzVF_6VJ zdUw+PE-hc+g*6{ONN4+Ss!)o672E{I9K~2;y>ci^9aSrVkxB~6aP)Xql@6sXsewT& zv%XP{&iBVSoNca=!@+B@Z&nwVxCb4C9%tfw{W)t$J0I@n9A`nEq0elP%Yv}4tTKAX zYS=!3OdZ8trWRm<`z_8Mkv-w!IEv5q8J!H3p}`qIL1AbqnBzQlKqAOdA{xj!GRsL| z|37t}Ts#jNKf6SxXBm-NM4_J$oO*@~47#9i91^|NFl2b8$P$hZ!K6L%dQctwwThgB ziu}2UoDR;m!A}I0JWB+K$caIXt>jkk-D8~THuuAb(rL0t=%p(6cnaUr4l}{^=FXG> z2SM2WF^n=lvPjHgHuqNa+DS=PR>uS-7K{%xhWbMJMA z<)>d99CN$8j(uoB+b9!=x%M1UP+l&=J(O!ukEmg|x>QK7z4B_>Cy?o3$ksmGNniWS zLi&5Z`9&P14JO;2rcWL-1&J)cEwX3x1V(i#t3~;yaI7?Jhr5RnM3>OD=Wr!Oaq9$X zXE;gYWCH9NJdRuREsXmNV_Nr9yTJKpv9r(<3=4Fm$4J|Y+Lm-c zu{k=qHJl77PXYt-6Tz*R=0y8RYZCB$$@HSD(Sv$NoU<$r*VFaQwE6ajoFxJ;JYrIPHF}~(6dv!X_*YhF}BcV`Pc8gOp82PDvg6@@tLy(u+!rdUbA0kP>J6B z(O;#PU%!{ucQ=BAU7cYN0a=PBLCJX&kb!HMm)qXn=NuSdm(%txopLBp(pl0;z95sJ zZPTPU#uoA5ETi91=yIpaIK)=6*X@D=|JZhL*1%+0SGk-{WV9$9mmn44K+8!zr#sUD z&a|!PP{7DA84hKcGh)WYeJQi2=j?ZFj&^1s%~EZ06*^}Opp9I;hwcQ3Kn82bSC=eD z7rpMJEK%qRnL>50RsCJgp@?Olb+y}oBWEps>!G-8FM$Um?(D|GJoT&CQZqd}hb#fMA!lZGO2GS06;wx>#gx9`*mOIVh#HtWw;&ULDYDlY&x?nZuezu) z^(b<2r`UsCK)?<>vAM^;RI=5={ylo-JnbRv8Kbx|9=A+2Z(F0@%G1C8i*!*>>f znmzBRZ}Im~&*5&^8nusZ$qn%*>Z14r5uCFj*Ul8th4^^m(7+}@PR~H+b7b`1_D%RpUu zxK)-i6^tP{I@Q z(6Nb4I6O!mhdrD29=?SVqYo-Z=Dfk7;-M*PilTx7*ExRnjE)j;eYj{DC*PFl1ot|Q zb5h0dPJ$GVA$)rj(NuBbN;IpVWTQB4nu?0UdO z*nerbeSh%y0{P@M2u`0Mqu)DEuYCP>nt#Ell&}Tke-1xrh+@1%X;qh}QKB3g9Ty`| zBFJ%4<0>FIN;#B4j-Y9AdLe!Amrv5er_c-va~J^E!*(aA1cMlq8kUazR4IV;a#(WQ zpZBSA6Z}`6BS(;t$Jz2}McQmP3w+!YU8(|WWwDuK7g=6Z zqlGLe3XFCS#p!kYEU&-2wVg8bf!tVI135KlM7~h8d?ag;qkSHV&}f_2a%21-B!K_r zKuGO(=$SAD7Nfvf`_i02A#2KgS@_c;&>7$eWbG4)a$?%KVnLgERApFK7t@6&V&BNynu( zNR8@P8$3A}$JRs|#VBOtJ{&ZntT@_s=pTM(iq~tYFGj^-afy^O$mO#w9#T~p(2bF; zP((#2E!s4YF__XRve3sKwsJOYIb}hy@6%`+c-z(FlH%9_&pkB{(lSO+0L{dr(<|4e z($C+1D}C_|ilN&3iC?G7Wiuo4j7it;Z}B=$AE{VA=M0_JCfKn6snbPXI4>f87mYJ? zaUF-@Gz#;ctGil1I9Y&w{j5$bR5At@+xh`I+qa`0g{B{+i2}G5|_Y@PCq(cq$fwgAxc>akL1oR9 zAZnLcoKqaP$_!2e_WKm%%tGxmc%UL_K|SL-PMdTpnd6ZSf<43dhTIFkgHmBH-Jl}y zJkt&i;P{DH^ypj=v?*k9h(F+%R-sRO>Si*49iACvic0}*&=wT=oaR|nC{6T*lU3WC z&juNQ37nkKnZflUXs)v!@Go$b=t6*FDa}Yk8B#RPH^L+ghK&&TFzejL8FPxClo@rE z1x=od$i|Vat4KpcmxBj!S^5m)6a~n`J!k_;033O;8CiUm8lO>z-zzZD*x%)ON1%dT zZrFCNkHg7%Y_=J5%=^vy*(TJz=#hb{V?#>lA`#ObZ&05F4Gw3$eM$i!4wnk(GQb#g z4S|=mjM1Ccp0G~Lz^&r?_&b*#` z>M>@wrvbvY9C8+W?Em&2U3a`LI>kXpA0O;u%R$~Z&e%gHwLyEc-CPeFy0iT}4S2oT zF4yOPV9@)SS)Rj|0MgLustQ0>0S(3_0oAPmG<+I#L3@bt*4$f;fWgtq2A3V2Ujf!W zYaPHK>!_XTqx%h142Eh1yj5)c1$)(pUf3jKh_k>4;6t%K%<1G!2vY$;y&NM7v@)27 z5@qTW{a)f}ch67K>tFi!iK=O30vg-{9Ti0sKCckj{^HS_@$WjGyoO-J3x!k-*Yt&# z@1$ox`F;o~(m)DIg=&<|JwQ)5d-4vE(FZhVUZJt^RVFaqO$|2UjAED^MPzb>YA8=E z6L*Ecvw`6-$Q?z1E3>PK?Ptd#wsdD>L%fkPJ%i5=w<&z8r$7Gox6`lx_J0y-o421n zO1JLbNk9GW_rjqN*(|b98t)bnjwv{sL!WbRSC|)$I!*UK_jy{r?4}PNJxUAnIIMik z{>Eu~_HYeO<6X`u4i~auPtd|N9R^7MaZO zwk^t0Napg0s-eTfIGY~T0GhTJ&D3eu(Q^m_M;zlz%#1RLe!Uz-ThMjj3cdE#J8AW^ zSJV2@UKkDyRvBe%0n4yLOB7m=jWb=#%FQm7(awCAhSHV;ip*KrX)B~hKVM5f|LI1W zyYUqY3Q3FUxgo&H3J=BsM>!w^!HH0~SQ|c&v=$1EL#{*RA(zMGS=14@nULpcnpIlC#`DJY?+{UrQBn zhfW$x*Ox<9wh#6LnXt8tXvrv5c@9clPba-yvJqS#S#%uh+1Ze$>1K4~TIxje3>aej z;YyJ(P!ieIChD*cF#HB!(g#Eu^T=|6`*e5Y#}7p(G5&ehT?zsbL;`O0;Rs-t2q;No zf|#7c2S483#Gru)VVpas2GE>~qkRfSXVSNRi6k1=ZHjXSQlDAW zzv4uo`f2)ZQKE7#cmD3pn`z_CAEpwo%O0WUKwT>sbpsB^vtHP31uIYp2f_1W!2Q6QhN5ax9o`?Tig>AQiM>y=F3%rTF1>FSMlzTTso;r36z!5Sv zszYj!SAut{!HrA;4=_SY$+#g0Ds_*7TzrdqA~g$gV((H@x#nZc))>S^ZiaR4!228- zIHgEAvZ#a4XUQ)PajN}>XOdei3M2X>T2#VuI6Qnpp%d$k0Spc#&$zR5oPPY{U!-6A zmHX+K8TaI#kU48GYyz*tqC-78hi2s*!#WNke|*H$Db9y{YO_#g*LF9MP3}1-iukFc z5q*KZCH{R5dR<~)9o|sGh}m9icbilw>t$duO|jY5b24T;?xp2*(M)&HQz6e)G_>tSRYJZ({q%aK_P- z(Gn1c)qqRH+k#WO8W7EbP@~&$!cdv42>Y#9$yqT_&hZ&T)|3@UNGCm9sYfB`a;>K* z>@owAqf|XIRO&SX(sKtZ`C9FB!Cz(A6U4-2=`HPIi@Sn`oeEa%*e?Ma9|6!GUg} zBbwNK(PIZrNL4X2F~vQZAu}Me*v%}K$h+xvQCjI@dF1kxt^-b6^{lkW`pG93Id=+~ z1%VBIZb6>KUPC^sL$E>HeDo}ZOE~!*bOtB_R6gtx_wFA#?%qR@eHYv-(2@=QgsHag25!xijK5ogSRSK8PV zMYml7iMCD{HqL@$kw)dV&$V*c*fI3jz#BJRh7J&sEdUl~$PfwS$#=KVRQs;Ju>L56 zx2JqrKOtS=lKVlR0B|@L{9D!0S9KatYfJQnIAg(M;j_6QU=SZ>qi5xg>*&8|+oDqh z|Bmc9b~`#+u`gMGlD_(g-?0GrV;ndFiRj*g+)3lg8;Hk2Mkj*6w)xzQwu#TH!pr1S zkx>Fm`Le6mC|;vaFd5P|===8i78I5P05Dp$u~#)R4PL)QMyA7gZ4=Zk&|N|K(~)<) zI*S%cnbpa^p0!L;eO<3#&=kJ8EBlhoM! zD9zq@EnUBUJ8i=Kv`=3fhr7u}RV?h`GXW83ceIIvfvqH3%yQ7(B>VTdfjW=lD1yF~ z;w;g%uY5D@Km9m;_uKymJ%YcHR&FfOkF=S#KYWl%cRwE!t7Z=Nh{EqoM75i{b-lh=p+*fqUefpB3awu5c2crRmhpJ zZ#i8$Ssp+M{*Hq5ZF&AvLP;AoX8Ap8C>%1=jI~)>0Ua_ytP1jmQ5m41Oj%zbL!C<$ zy-^}giTqa$t_6b~0|J#LhuuHgG1L*)_Ghe@2%1U{g;Zm>!r9pY=rza`MJp_Ptfw5U z=rznW}WPopp(VIqkKURS$_?cv=W^X54FaGlY*1VTI%h%fBcQON4m1hbDZtb zss)2JtFy~z@8F0G%r4+mTMQ>+G>yUQUz;NVLjPB|yH%97j+W|5FrF{d z+ZKOJ&ePWt{U8izXOn)~kLSZvC3=?Ehv8x``XD}{kTXz@CRmI5nEY&IS`pL-961ZoD$XbjIRZxZ!)^;sAGDa!#^)LeGhzy!$k32z*3Jy!G-7l_ zz~uW%BeIy+t_XH#F!d?EE~BWQ93$jls59(XAHEwOG_45o{^Xg5#~j zhKxESpp5*Laps(ICMp_j*mxGjO1*19$X185qvOq8Mu4VXbmqP7+!_YUIOo3Sh-a?w zOhX2AARQx`6f%Ffj?O~AFDzV1AAYcwzVO;=$TD{cm-v9J(+oiv2tUs%(z*{HP{g{& z-W*2DE}aUBCvjq>%2@-Ufz?3PkPmsKDkahBV|b|Q9$QT1;Nt2?Zc${ct#SCnaMnO8 zrR}Xl5(86ddt--~=`@`|duZUem~E3Umia-{4*ENjkhpcTIL{UCJH}I+Mg_1+D zaTO=9MOTMY=s11WM1^bph@z=Z9mC)>aQN5)^w20Pq;Bof{>HBf^yo$qay0(l%ATY7(m-^eG2@X7{Am2Jv4UscGLoT($8r1s+ zkW~;`Ga84W<7MRCqBzI@suM*%ZTY05<#3`3zJRvCjB5i~Hh5ozOKf0vLXo9`X@g)v zJ5(mnxgeu)LEtZ6bBcgtfSnyfW()G3Ih#7SX0B`{W$QAFl}M$a<2nE67)R`SCUxq2 z*b*~_UVDt-K1Z-zuvi@PV7uig@Fv5WyX0p)XBIhgT4#@}t`v<+oc2=!eXfk`Le)bb zDfId!PVXYT=XE1}Xg7imdeY}Z5o5=oHz5NSM%pnT&%WuH+3v54t;_MuJs{?R=?;Am zjkTOnjEO;>XUigVGt|+WUwr{@Tn?!Yv_+Ak&Gr*2@kL-o_Wt+M#*?UeLHWZP> zz9{=esG|ikdoh#-o_hzRoxB=r1CYgfHwk)k1l}2(_*@^l__lOjRqOw9-<2=S;KY_!gvmx>+ zl7+7jQLhTigQKHrb7Y=^n@4G!@2A~&zn^wL{85^F@heejU$0yF>_%{(pnsDyOHn~Q zwY-u}McLStzKcWJhGkOIN8}D9n`U18Tx!oQ}fAN=@)si_e*IJF8^WZ8&L=PN_1z`~VF5Vw1unM*H!m0$$46e?rN zqZBJ3x#MM#Q-l=u9PP+dNVh$fx6w^Ej;*y2zK7v&Pxy z8Uz+7@-_j26oT^fd`$dXMTU+~_QGJvr81}+q3W(DC?1Zcj;+qFNE{my=}(PlAEP2S zRHxfY`w==1a>)C9kN4`MYJ@WUvF#2wgaR664C*)_rbz58s7;7B{vN)LZZMhdEy#V3wSl}qnplF|toeq-DtenRpYZ&JXA<{vO(;DY%Br_p%I>yND z>^!Mm_Et%p;n_64qW7Wi&;`1xqPDhyk~*PdVY<)#Kp_UgC{4hCuYfG-fSK*lDX1au zIsxIFb1mh@{**j8feQh$#t`tqmXrt@4igLS=>naWa*{{KU%7EDZLWQimTtlWMV@w_ zQ|r;C?t0{31+9{1qN?rrYi5J0)WptfBjuGvNJ>HOFNNvF3CDjk6Z&*;tM(0?WF zSsI+QwL=W0ddp_=o>!-HHF#~F^}IN5rvvl}t`8Fe58&^Lz>&f?V}xuSVm!||4>j5f zo@4mi+`GC?Bv540f@V3uB1e6kPkm7PeHy6bIB#|ps`K+c750u7Z$N3n)kRKOG@el& zVF_K}5FoFkp0+~22C{hy3R@=yZiWJ-!s|J_oSj&3?pG&-$O>u&J!Wyz$4J4~Hy@?N zyVuh$S;H}ca_m)Vdu83}AIy z)zhxI&#Assj;65B1|RAb*K3lsvA-_7jL<;_tAWU~4ub#$pnFh++rnN*k-@FTu3M`? z;LDy=DLOV|W+p;#$<7`QhB~7|MzmK1ESdqmR$dKO*4B17x1zG@7Wn71|5B%kMn^`D zGp!>V>&3mt-~wb>xi-l+;*`<@nd_;8ZTUDB>i{wVY ziue_4$#sx*wt=g2E?MVsIp>^nX`aK@LArT`4qFS?(gz=|&K65TDt=vxcKxm)*mhuXL~=u-qS^wZ?U0MxcW&g0l^r91cEAo3rkg=Ya$FNe-x!Sd4sd$#6b(YIQ*H3yFfqXP2mm1O+%diN1`ka|TC5 zL|j=hNQpET2S&pg3X*$J$ijG2^rHii)8j=E*{)8MF_xG6y~M~}zgzu-r3*rDGn)zc?!;*5u2k$+i*K$E<8d%pdj#jfAWxG{0SWO1q79+ zVGXMAe-NH@zPFZYS8pLJEDxxj4wC_l2h8ZP3%aMU<7Y=vl&x~gD7K)5AOoVJQJlxY zHFIGQrO}hS*5M${fx;ebtua94I>wi^#OcVAF3G{=%)%RUqF<4!-$)&?Kc3=HOdtjJ zr8O|)BiJv@0qbg^k!@R?XjPeHROT}}U+_5=ADGGTy;2t<4aa*+A|+fv8ieUI5o?VL zSHh{mX~H;KG^3FcS@!xmVxqqqX9E##4iQIjqxc%gy64gW>2T-Ms{J^FJcE(FTFBR$ zex;Mc#KJ;400FL33}azMS0jxBTt@KghxC%<+Az9VM?VL7;5^vTz>XdoTm%#+2CBgW znx%6_E#A}d1IhxGfJS4a&poRn4SFq_nZP(zk)=tvTP@Bf!&B2tv7hd~w4BDdpA%B3 zYY!iRFo9StP!KcER7UvF1B_0S;xs8tMegx}9+pkcz&OZ=DbNElC{wpq(&LjI95$ZIeytHswMKg6i4DxNP zKPD%R>vY(MK+%yGQ+%Vr5u%3dDSK)lJMddgbRf1VtFRpLy;h@ul7G&}m1acf6tp$&s8;6@{+;lvA=AGSLoHHgZVni!A zh&>QoSM3;7@05MAy;jdNfUK_Exk2HZZJT&P*4Xi&W&)Ler^T6Bv8KT81r|-~EFXi2 zp1@rfv1{sJxgY3^@k!1ay4XJLMdYQ2bLbh;$+L?<1RBVuyk+2nID1WO1|CS*6YWZ$ z*G;fj2Jlrr*KFhB(sDSAb?io&GjGed0&>wc(DR7s@SxXpA}q*K*Gh4z@Hs^&EQ{#6 zgC`6caQ)#I8*CZ;*mK@4NoEFYdubZmAq$gG=q49tDg3SzBwzzr(<(Cm<196|R&w z3$CJ|3fa`5mQyLcM-2>hv5)IqNTIm*aiXi|oN3U##6P+r;H~e+f>$ zod{HRPL4AR*$;ioe(-%Bhxiht)a!a45hvVRzLX8_(F_BE$sSOI#r4!9oFMcj!4&$~ z_gRGP^*LN4zru7w?6;qXa#k04R+L-zK0ZVM!7uD;ASr$@@)@7UWk#>z-@eRid3`I6 zeH}K|*M_sJ9AQTzh(HF^FK0Nm(y8?EE~F~52io7_ICfrI2}ZGs+*%K1_E(CYXSf7q zN@~&+6Vt4vIe}X{$-m1KXiky&7~^v-uGQx#Az#jD2t%vW&01;0?NRUTnys+A3Z9~D6?ANX zyr&F~N*S;C>dFd-68@XQ#{c}VBB>JT32P4@a|lpCY*>!M79%snm@}e-LW<%5p)=jd zjSKJl5Is`zuB|sNf5jpKsV^dDftnyJZbFIIkI_85MUTG=+JtSrQ%Q5HcNqV>l&Uvx zutyv;-XB8Y?x8GV^M(F_wJS_bt5Ahm?H&QkL#;RjIl(xNUs+`V<@=CESg#J z2RH#|@Tmr!gY?2GRfGuA-Os)n!Fq-bxCXy)k}d<|EBDf$|KTqfTX`d8M3zyqEgZEH zh*i5r@eKwQPa@jLl!QH@b>vaHV??6`6)@{lARXg#_dzIG72-Mt4o3$e?dYMhURgJr zP1ce0jO2A-5Ju0GDeNvOc%7x->~flGR8d~+)4k&SFij3nt{fVpaic2J*j^-xEi`!S zw-e_?*o$&lb*7A{;X|P z>0A*8M@f5d+aNkNMweeK#b^j(+i2F(zxvKQ1P{xgGFM<(vG*v$AqMCSgIWX?FR-4M z8;{c5%~w*RgaWqhh6;yNA7UQ{3rP>8M&728yZ5OPw`q9|gncwJ6a%KBQOBh# z-oW_Hqqs6Sdq1Xk?vVE?=M}hG)11?_w||u0{i{ErGM@H*+@Dkfo*-o&ucanuqDthR znSru{uIaJ129J64RDErev?v^H)^_gd5=FF|>39ul0>*jy&b?G8z3vpH4w#^qDsIn)>fp?wdOlsU+YdN>}5YN-n$e?qK!kP?q1ZM}^QGh41prX%W z$aErus={4|Bh22qKYkh9n6{2yo_y~w{F#o$>>>qU=nEZGCm?Cm@;oEA#{Oe4SpO={ zk-?Dk2!kmD+v%A(UW<%#<_+FN>>Utb0HmSUh*+qj10t|c;I-f)L3;@An0r@O;zo0a z$Pj-KC-YimnLtsw<6e54^-B==?gi=X?s+7qP+GI-i)Zz|9ljybT@@@~opgR}g(MHvR)&Yr9;7z4d-Nr_ls~THLbk%a zBeP?Vb0=Kc5=nkqSb>FOJu?ONB2IBv#mq#APUpi8J#2kSRbK1ak67? zkDk8RnlXaX8B*3d-lBXW{yMpGDINBstc~+rEHdw9J$Qc%jsZ0vV{Hb=NQrCfoQItg z&Eq}pb;rOQr)P?@F{BmY09lV|f&|$?sS;T8k|Fo#DA4VR)MR*|urB8QuUj5O8pgoG>HC;`$zfne%1!$pD2jW z!2Y90fGfB*GLvBs`3R{-0KB99alian&Z=ig8kX5@-F823h+naE>tSVsh>G`b= ziL46GXIoS6Ra(g2)*kx|D8n^HY(4ij1ZZOe?UNk=p1(8=LHTq659PH-5^nok7cAj)VRiQH2y#T=EJC4|h$ zYUd0-C=un0yx80GgoAa-$yNc2k}24FV^(7J@czema6NT?1dm8$gbinv+@ld9r~8ai z=cC;#4*-vaV$=~vaqDdG<#Y|>qb{W5<4@j9ne*Ez``PPoHTXS+JL5zNbq;gn)Oqhb zuRkGeS)~u|_OoXZsWm`9XIAF9yw10rjwyJfCq(@?qxAb6=%i99SfPmEoGubeufCCf z@v}GSb;z|(4$>S}{Zc zF*ZEANRAP$@SsCA!lA)Hb#(4oBO}jn%J^O%D+=`Z#!;G>(Qur~7<3Vp-UOApC?O}0 zOkKm-C2Go(E|Vl)-&terLG(x`^X@RHRvH&kDz@2?i>SQoq^h8Fy5y}|1i?2f)5wsD zCii&0FZ_dFNsm7MkhP@9iTj=*0Fry%Mp;hLA9V)j)}YL0?JBd3&&M@lM==J)>TLSG zfBrVQc#^7PdR8JsA_o@A*!MY%8P3oQ2<*YrXABp3mTK4UaoAWx4ocA=i9Z%;4LB=~ zAr?i=f}H7dI@ex@*vx|T4AVj?ai7E1L#CZQY@!$Aq}@(IGiR83WZHEM!+nVpY5&y@ zO11{=LWHeBX9tV@V&tSsm@kuD{18KnL~=OgW_thLqZQRxZrn)gkKP4Ez`L-)_g zK`2?rG1C^y~>2&>JT3x+O00i%hvlG-8 z6taPcL4=5Av?hbM<{%6#Xvil&=HEyr>uOgF10LJ%M9~ZK;h1OxwCEiRT4j)A;N#jE z6p83oY6MfE9J}Vp2(YgOC36O+xq$*yxKXg-6efEO%HCK5t4CJo+z>< zk9`V9tBxH|e~v)TcD;>P(595M7=pF*Y*0Zk1%U{O^E zgbT^XFCzHly_W7&m#^+6axA~ z7uDxvUDfUNljAgpA=!TV9_KfoHrAftAQ4z2KT?UD%BEcugs7r}Fa~L5^-8*VdliQr z-fieioN{DA3X9n+o&6g2`3#<_Q>mID=I*nF&1dYz0*;0Skz~`Mnh{h}mtp{AworP5 zYRw?I$(lMPO$7cD8HnkC?3o!2`yk5?uflVU9??9L9X_OTG_WP=w>G+R8uU&bJ3!wD zA6>h~IkM%OT+S&6q@du=Yi7WBjk1K!Kq-fw?ecG1kcF~z;Sd6|G8WL={!zfp%u94> zV7cwh+9QAw={9H!DiCX}?C7sJO)-jv0d~|$1~)a3Pz!IHWI~uF1vewL2 zi?&;+f6j`*E$4zY$lzCaF3k!`XV6_Ka!-zNZ{yIl3_92kYcawp<;=zCZhoeKcm}oY zvEY2Ja*|=shsd27JttR6_o(XBBKHQZK-jSl0k-I>Zd>-c{6ohZ#nGvM8hoEJ#kLt)SM!rtKT0#Eo z>#t3-Xw+d^6V^j;wgg@ptZ%ut6yu8v?2-1Y3bHPcCYQVfK+)v2ThF)B>Kt8h)T#cD z`)m^sx<>}@6&&eHoNo1$AVWMBL9H!LC(sv8ffbAuU^>Qz6e|RgX1VAXW|3?kaH4v3 z9vitwi(SsN$~UJnJ3fdP%4coYMFUJTWYg?TQh^wNcBD9=5};Tnc+Ct&77sYMR#K=; zZFcPq^>KDY3Zi;ZdW33(>*(kfi6}MJrkRc0Tyv3*AyTE{bK`ZoVq@J8P#;7NK|qMS zHpW7QTml{Jp29$jDmaYDeF+CD3thsgRRg)vIubZD@}ZRn&Qmz; zUG`rG*nlEN{CZN9MdeEF9gfZR(+^%u^r<%O6-R%;YuEI zkPDHHxDwkrJ_`pE<)9(RW4tm%xyAV<_RlFftT9ok?coe`ybsfj9lD2DP^N)0m8>!l z`I4fOL8;i@%ak9Y9;y32D0iF{o>j-jXL5}r4INGk`^sxYYHTbYg`uoT0Y}T%I{ZT3 z=Pa%EO7y7iQ=KpYiN6U)vUi4)uhYfZX;HK#$5Fb5P3Qf52Y3`kfDO1%j04v7VvBZO zI%Vv&j*b1Fm4{0?ZWxy6e~EK+f=qU~&KrOCTj|xm`3-u5?x&XtPXE{c{J%(FdHwaY z^dJ76bVS9;IgabK*{kUYih>5`_0PYO{>T65zoc&>$U7KIDZ z4jkJF0`gOW+1H$axZgcm3Eu7-KscF1V zpy8Sx8I=3QdzQ&;h8c)X=kG^;j|8Hyf_=*&mkx{eT>cruWCG&2SV zt~K1}!epLc0fY}~$T?1Lu?WaPF`H_3bMpv?=`!8DH60G<$)@Q31hlU?954bpc)arW z#^N5S{U9A`Z_WkeQ^!D1^_0};G`h>yeKy6P#L=wNt;4&PTX~M7D*`De)#vj5PN^fX z|88_bL08xsQwPxEzuc{(O76WbV zlw8jSBhP0!BUyqxr{KALrc9gYR9niO;a~=W>t5;P`T~MWJ}6c;Y?pa=gC6%+0}%D?=NsW@DQC7HbblswERiL7 z;}!#3Fyw^CCV)_%!V0M|>HzyT&p{cQoko^9J9c)kcsv87*g&5-QBK;4`l-#@=y;#Q z!LLothTbsqXA3=RY_t>FrP((ki_6ds(J%6el^;3ulQZ*ai>?~$ zPuA%2^aeo*Hp-xxWz$Yf;7E5ka84Pt->cR7_6WAHPW|0P?-{H_CKwsW-~iCtR#H(PFiAQ z@EB)zm%5}wC^9HP0(f)j@ayPbZSam(Hz@J_98rWkM4zkfoahEK`>n z!1E3QD?kVB4SK@mQtss07dTUPfpQRq4zUBLD*7HgLj(GR1@8xBDQpJ|Uy{d=T8F*k zIxV_EIFu*X#UbnBJap-=Yr1{F2aAG;K8jF%@#|lrvThKng!{5RUv>(F(8!R7MSJp@I$QigJB7n=zO_z2R~#`8+z|`rq2%fqi6iDXLm8;U zhvWW-9GXiP(9}QV@+#;|2WrCWzwrf9u(XgNVp-of3gl#Ab}>ELenJ#`oqlxnwDj@| z>E!7loYP&B*mPMvkZJ{#8HTFpDn#PtNX4@u% z4G&%zGp^m`zHCzxEyr+5G@KTI4OkP~^Yk$Yx!gti*5CaMg`I7jdmP9x?CcK*^T@Uz zzDHxbj5E*YRyizJmS-Z$_uR-i%vKiCKmXr=G&i;UW9u zLlesubiPy5>5pqS2=MR50#q^hZ`v z4vA{{nMD(#{ZfO1iy5EK=OU*LAP^;#v#7Gl66H!)u^PP&((j401yvV=tK(J1N$5@C z*6`Z;-Wl9$J4vt}7zBhCU5VUsk8;*}q<=(#<-!@bY9O-&BT@W;%+;YXc4<8sQQqhosXN|Qh zW2nL|VIV66O>*va1V{2hAc@l(okQ3k0~5KrWB$yuj11}E+A$(huSlnktbwg#yp=7! z<#TmfM6J|@oDHdK+;5I^<z0h-H2lpP8RPw1#Z0LDIP3r=)E(OC}IaItif)$A7g9X@#g06+jqL_t)%8FcZF zKp}O2Er81xSeGut7kHj=J|oX(P4YJfj|?BW36P`!eZjRYqp$>Pd!Q83jLvX!3!u)E z*gpIBnl-Wiwz|9r+7%ricta6jXvp()K^xWeC>&%LS>Wd)mLh0Y#JgV=*1rpvw2EyE zN`ZQpYuYQL&Hg%lZMFsBEB4799aVX34*a> zoYBlNYmCD#4dR3#vTXL2Yui=H&r=6TXNbZj^(pq*w0^WE zzUl_2vAN!&$Lh2isIgrNl-CmbW-nFSVA(%43Yn1W2~cLyRVC8TH<(k`WzZ!C%pKO> zKDyG@3dm}-Khz=T{HV{PUW5Cy(}T1VZV+2E((TH&T~P!O)bW8vA{#D!^q%H#%|H=q zjji*dCq6*j9N9f}l)i->lrnq{o+th>IvF~GEytuLYG0#oFxTb)hK%`9_Aq811Q{WM z71oojh;<#qp$_L2XAI{8S>xO&6ZlTtG_n9aLZ{X1$?HMCkFif>bbf>NkS=N;XZ5`T z&}WTNSUj_}J#4n`^$+XlSqZ+NHh?o0hdh89ZjAd@&QUHJVm)4FgZ#ei`S~*I?iHOWUH}l%8cz2m>#!!?-q4fizAWQXmXcYjkFo(}h0~8G4={JF<{hi7}XD*i0#X}%9(!%@- zga=UPw@d`<-02o*f)TzwQEd-WWK-NfQyI4On7Va09pDH@6oZ3+r0~7c2e%dYJadkd z3Sz4;EzB<9yn|kyV5E7*KHr}{TfUOBI-&Tucu?rt%NNnS&fHN3Th`e~c7w<=SnGBhK`}E=3uX<**6hG@x7*_wYU= z!zei7k$Lz~Fh0B7PdE4fN9#gvSu38!^rcEuWgv>_U<88^3ln0*EJE(w^#fA8rtEci zYeq*z=^_G-x}2F=NK4~0bd>l6MF31dv%hUYjD;^$lgoqRIyN|0h#mGZ0D08u!G^Lv z4LVfJrCYZq(u0Q|#r=;lC?Ythtal&=96*tk{T-$zQGq;w%Xj(oIIYs49)JhuZ!Z1* z@4cC}4k%o@ax>CuA_c)igYRrlR@?oUo{JP6vZ5itb;n4JrN#R%rM*u+Op_e^LTw)I zrLqS)&TFbTU`EJYoPYOM93k2AH4rhcf$Z7s!Ju1NHeiuA8||rBw=zGUAT{ro>CDR2mDGLj z?KF;283u75qlUv)(rHvNakd-~;B4_2Ou)W*9=x8;a0BH~W3MgD5V><-b$)biH0C1L z7Kokfksh9(KFqo^~Hp3%Uzx9B%Fr=~~b_u4b`` zL8gUZ7BosLXu`dpB48B7xkQFfa5!T69`_(kqzmOK$C}Euw7AYr8M!R!E}qlte=eP5o*q+ST9{)hiAedl-n1r7)D0xC0vyVvI|*`u~-Gk2|`5DRM^P2*X2`|9?fZ0SqsfE2lx zPWKt#+wN%rMEy8)j(a2}$9;*kC5Fv$s5)vdhfbMNsjfnO#Xafpw)=0OGjr+M z)rqupjgj&=niiQExSrwMN=q0=$8tnTf8*fdWa_AoPtjoqq~3dW5~1zC4tGXl&W}Oo ztGp*@F`Ua9f#eDN!D+GsW{f=l$JAR)Qj8^~LCVjG`vavX!|Qdvra|`x50;W}fp?lCq&T8+Uelr^jS?Nci^T0ItR=9)NM^kT+qq6R_A2>K1X zBtppr(BJ48^jL;sKf7ZbMNv0Fw9X9AYfpoRzZOp(jt+R(^&mbT7| zquDuwut|_@m9>UY1l9G$4j_Mygmz=p?)nL5hjr*_WYv&+^}P-F=kKWdfXp} zYV^B6$yE%z4jtuJ1en-0+ou6%s3oHat%HAI24IrOl?H55u-q#HI36kNrIZ^3wNbBs ziYWQFEh-&vEKTJM9gv0Jw2jtGwcAq0 z8POB;ksM+i`&61+wAChj9GIHI@#|64^Tt>2r(gfp*U}2k@3Rjc(rcIAo-|DtrMkNO zN_zXRKY>y(3n%h9&#A!jexkYy(p9JMsD?Pk86xfI8w&bc=Ac+ep;8}l={V0UO`qrP zp#TbV3kV>n0S=W$!SQ=7p5M(DH7bB`M$bYV9XYyIgCWVQ7b(n6;GmshT(KlG5-Vz^>dU`!_ns zO%Vw4?>(N!8QVIK%79$}Og|Jr?d9fLJ28fmpS^W2oo_x#eVmID$}NkNR3jZ9se0td zKBF3O&JSya)5xKbKBL2HrwI%3dZNL6M(|TvE2Y-wu0U%DUKoh{!u&Ke7SJF?fnVBs z$iu4rY!4@WLA{5$JwfgH4z2jrzKT9pF@;B^LCEdpZwG`U(%Gm<}d zND-AoI+~0t-+1zf7`&Li@QuHbc8)jGZ~s@nou(_$olc%3KN>O6RQA~&t+EFi(=Z~j zC>V9tO=W6#k1+iFTN!GDP+U;YuyFNC1XrR&UPN6*I232s07b)ZAgXNg|46?!;6}G_ z5~@1n?2~)zX!LkJ&lyE(>_O05*mG0&uC2k3h9h{pJhTN$BE<6X1rR(l>QtRX_3r3D ze#0N{Z}dY9wm);v_=A42n2i6#`H)hev#ay&m(&a$%*dKi@LL5ntPxb?(9ILtsl$i=qXUK79Ko6 zON13&!p5=pT*vjq@Y=JUA8?(i^tb>1-(xS(Ip``m`Z`tyYZGLF&Nn%O7C*CQ5e)G= zi$V`};02>Ecv;AG6Zt-+2)Cu;K~^nLRbTi=N6>)QZS?$^q0oU;mqzO~9A>jn>QbE* zgLB)4im0h;9sV=my2@sYb0pWP=g$#uVdl( zoA>&A{U6V@j<4(K7yb>xuW|YG-~5e^;Shc5(DGit=d;2w_ElyW1rJ%TxIyLFKYljn zg2f2CY_XyPjv55x4(33a0f@2=oFN0l3x3|hmo&T8#s(hl?2^&r+(L6}ke%3B+et@z zN3<{8hXa2=0Kz0R{1@-l8W<@ia(C+#K98eYtWz@Ett|LIDL=O9Dl%L$UAw@YkBGO(cZAsU_-b9QNn~h{7JzE?h65Pipt)eqZV+<8}8zS+Q(1UZjYZW-+wI~Jbpkbc_W;}GHENvKW3m581opN=2-9H z;57a1-}n+KuCsLe2EFU3ig)a01BG(BkHgIKmH3Zy->tAN;}m&-s;p_DKp)Nd)%52- z+)Y3Kkkrx5*8@=!5g46MIuqb@28S8h@E}=*E3z!lF(P4}O*G52YY_u7fAdb-d-OqC zed&#q!Du+r(O##v9qSQ26h@!wGp z@u{-(UgWzK2|W6=2WqhIagIT!$5@0Dl#>zE0&6=YkP+cpyvk_sjm-!Qy70~;>)`cO zrl239{C)WEreIye=WqX%GX#g0v)%Xv=MfpRT6>?iT^ATNtMxT}NDS-WBrvvZ(x}0_ z*@YsGjx>mIf|5_(`T*Sw^0#uGYH7HL=#CH8S)VyL?9hCUp%`TL7)uUPEn+>zl*SvY zE9o3s(eB}1%EJ-QQaCdX8kq&5s&lVrM|)}K*&`??IORAg=l1#JdC@);vd%Ek3N)K@ z^X5<9PM^H6}ZM$m~&e}|z$-nzDGLeHt&G*a%Z`yKQT_E5ClbYDJCzfSgF z+L*e|;7ruMGnVDd;uw6XbI7)SlpXu32CMua$wCE}1ozE?U56UZ!xr~k#K@fjI5}uJbe~-WX`FMaakMY3j)DldUuI0g zf#8RvKq`12G{JOq)J*^8cYi;E9`~in^SauiRGByrnY4QMcGOk`CzWe!v}2Mc=Pb|n z>VZD;Xa|H68|0ZQgS`9MSn!mcr$C4GnT0Blx}Rs) zPY61W0UokweS&LvxVe@31T2%#YQ~XE`_;FQfgH9D4GZtu{XuT&ywdHTgZ`lt6>wx; zfJK{Pp5>G}m>eAa$iT2~W-ZMEX36vz1RJPIpRkb6OxmTC7Vb|wYr1C$lB(Zn>oB>! zW?C-UU+EX4feyTm=R&A?4*e=tR7VqgEpNDkehY^)?oFqW{g(@jD5*=muP9XGdPD=f zPxOatdam_-@U3Fub%5Cg&w4nvd>3D=D~gQQb4@=(9pC}<3tcw);@><=C@R)FoMx}* zuXS|2XY2oy&lmtew!uK&J8HEv>h+s~lG@ILNC^?V`dhBWx zxnl}$X!p}0UB=kp3(~VHqCN(AR9wawl2(C#^75^l>6I5> zN{`ld()YjrW-8OE;S{9xlA@7@#P+4@2rFxb0&f&e(Qz2r*bGjSqX{_y5si(~0>}~v z9d{lB!-t?W4v2>2{Y;~9vux(TXpw_qz#&rWh+1V(AqiuNz;x+DT0qX;K_S?q*s8X;LOO9e?LPS=B1((#SOJ_slwgc&K2E>(Yj@M# z>r`AeY30V+Pa_u-yzU%h%#M)u0(JH*a4sgech6Oob)1?Q5!yd|+)jV;wvK{O4c}86)!@TXMK&-b$S{&CAz!j6XZji{`UYCDH2g_Jbd#f=`-JA zcmZ;B27k0mA7wi*I5fmF5$*3(7ObZV^9npmE0r6zvE%pU3OqG_AH^~_dkf2}={f6S z8b)KG#T>PWvr7ik$2cQ#9=IM0#5u#EM9kWXMw-s%b+v@Lq4m3l8LMV zk+vgQI~e~ChHid=^gp+HK@{!#yr%tdk2Tbn%012$PM4Vi+rjwdK8Q4kQD(yc(~brt zvo#>~Z(${SW9zg&0mk98^>hxEsf+;FyVkZqa#M#EyCQ#nrig48-QimAU}T46Ke`9! zY0Yu67?wjE!yi9*l*+5KX@VlI0{b!G+#j7Br0Wz)btwRS{?|WDEzlaX18q3JQdiD6 zrzQ4%3deW-y`N#&Ko1G1FDRfI5KLYiFw&AeuR|+3*xp43mjdazvbvgX+`OG`f?5wK zI$L}EFQ^Y79n14O;uwhuj$;hS;dxBs%5{y^AxPKI_$(q0Ex5*?`_-?eKmYEZgnqko z|3#3_XH178wai&T2u8LEx!}SaWHL3bDRM3q;he%OsSo4OG;BepAr2m;&I$Zs(R>{N zozEWE6uA?@wTL&S&vCur^RmwIJB)(-x*>a`JZJ#?QrX9shB9shgU`Lifz^Wpt?IcC&E@rOIPN@@j4)oyLtCI z2rv3$62y$_7SOjk1n$T96;NrO&5^cJz={O2Cv*=e5XhK1b)>a)0@L)DyysGT#G1-& zSI=IAj9M&ZFvmS(laK^jP2rTW{vt}pJdfKdx;y~|q3N0BK5GnQ(M}EkJBDqZgCKJP zI)l$N1^CkejFHARq}Ugg21vmW6CC(Fb}CPrRcFCq!|qnv$8nr*b%TXy_TQbRUdH~! zY*BPnFrbYNM%;lvA z05_T-bJ7dOK|pH+GrCX{ps4%MQ!)k;?0cSPJcPpN;2Os?%WE}Aw5RulLvOIB?S1SY z>tEnHJ#2Y4dzx0!!_ur~(53e9pCo9=I)zf|4Z4=FY2;i{yqd9RE%#vszI4+J@;Q*Y zfXxgl7_w*wj{;d99n=$O8)hujU1vB+9qxDVDK8d78)u}4Jsm-72sB7vo#H-=U`6d) z&_b*Q8Fhg`k8w_V{PDTU?1lYg<;F`Du}zo02o)s+`T!B=hdcp+^d=4|*T$Zq8_c|f z#YRVY4`#$s<~$?+#+DI=xY;A0#dAvFCGcRDvPf{GFU01>^<7K95C29Zi|ogH)!F<% zgi`+}uH%C7TGllbKAB+-H}|Cw@WH~}M&Q{Ev^}#8 zelHk>+Z&k!ed@9S8nPi+C9gNb>%$Snk(L@|0lojT0MowLW)_swo}lOd$J3j~%6^{r zeecYf?aVoI_I@>9-rEUc@YMTHJ(4v1h-GCZF z(9}+yps|yx+3xIX+;NA{bdIE2g{bYxLS5>Y`fJB~Sze2DyDj0jUe#EI@N zni2l+UOW!Z6zw{Fjm28rm?gC>C*aPQ#Ee2TSzksNM%d0@S_se2DL0=sXKtK>&>BKO zoMUxqZ8u42=;M7*hI(YbbCn4FpmPhpijuL(RI z7xTygrrI6u*SI$VAAyxekjB6cHZuw)uo#E&epWH;G|~($nvcBAa0ClIsB=%Hc|zWF z$q3-VzN>ho0VFCy8->WTnp3U9k>*`rbmw6GDjk21Fg5oKMZi8qx(7_JXQn|f)GF67 zohbUc0-EgvU}&g%?^=r^N01kQz8OSj@%%jQr>=PNloO3Wm zCPV6gd3(0#av;GFwB9fpA>Yf(RKn`P;P$8ilo5)-sa*;wJ%i&VhO)4}6O>;cJoOc-JpY96y(v@k^5L2Vz@q!NBVr2nz^eoi{t-j*P0(N59!Crdyqc5gcU;6U^T$31OR@6rhG#?K| z$9eYgU>L6T{4y8=3snLeH5B-|rWxk^HufpYje4exsLd9`kIUL$LCS)tW=&`ed zh`Eq$6rV*x+=Md*^o0BX6ssJ$4hP7EQCxysl_HZ~pue;WKt}KtKcjr{ z0;6_AK_BEXQ0C^R0vwcWwhu@cgZ7o-rwlH-;J!ob+vsF5Z9dx5#el*_;UFI(|CoJ^ z4n2IqqS=LcJm`3S>^Eys4!ab~8>5CpSt;P%ruxc!B-r-_V^!S-IXAdU6J%iRq#kT0Nd$u$mv6BHfg010<_Q z>**P~lXRh>3X#U%D0G25_v|fR7GMahjD8K1k8BRKtpX7ap*Koh_Cr16+3<`-`IYtB zpZAQRGUftz;ei+@&9Q4lTg;rL8;dk!1PyYeU<(}vkDE7Zn=j81^$nB(lpqVD;0G_c z6Rd`i)kaQ~R}(mcERt0oSmeF4d6Y zwFU+*8a|8R08h2pySSG8>mSh^7%6}P-wq)z4iazq<9$^4$Wk_h_x(INCpM7%MK;HQ z`?~juf4guV)Za1c5dN_&7>3u78GNkgJ@}4ipkZaVgf8dYN4YhSLw47(6H5nvHPsRK z09cVVaJdY;VE3P43_V<1^ikIkHjRT^YjG#fte055ML-pK1J)zr*v9K!^m!DD{AFF# z2y_+G8>s~_0vqk>;8p8_EOgbG2^)pe)4Xhfvm@V)76pzlTq{a0z|{F7Rw5U5gr45r zwaZH>w?|3<`K~H1^GD5>*}Jz9!VC%zhETY(B$O+wjRPGVye{wVJDaEH!_Qbv*+RKk zdFp{49RQ>dB6>9hXbqbmOF3+=0!o7HQ2Zj_=SgQM&MW0X!`Pc| zy_uFT-;7o?77*C2pw$2r^BH?4Iw`8bSx37ayhwUOaxk0D>c%^Xrv1&BLUB4M)=$S_pez*8`;YJ95O} zi2x`7b@P&=sD@`!NN1rlJs4JS*Qjpyb9oAXvKM;InAAl?X8LJ{Vpx4ncFpRQ)zp6d zJ_aGF1Og`jK%YN)t3&v){vkkXmq}%|EOR2B5twkrp@eMv;;@chD@vX{~{DMFvSe-DHFJO%}< zUAsa{J&Nwg|E=W74MRDVQ24h#^nMDAGU?9iKft?e;VNu|0<<~3;a|N7wl4yi;CUGf zg`879BCMSU-@ye{piuLf5517y{OZeT2D)3ME6M?VhCMG^SB3Eay(wejjNMz#d~9YG zp^jI|vw$ufp)3CiE-g^hvv_Vqf_;;ARR((QP=^Ti!5aLmG$d&DSqDlOnrH1m=b*`u zxX@<&giM7CkS~HdKM<|d47Ky0_>aE=2)C6*eZ$zJk@MYl>Y9p4c@F(f&rjf3U??m7K9`8Ps4Srm^bIq zP7SV^@3Ob%ybhE8W3OCgh=sitx|u#ThKv^=v;iCDW1FL`C(}m%#sPE!p<$r`0h&5Z z;==36zBy7{@47lGBQ2wAI)IYhhuf(}%26JJvjkn3TBl6&C+d4lcjVgE17?sVMI*i$ z!sI#zh(xK=2@oQWp;O)~p=<=xhxSfJFQ4O-F@?sx=+nXBg3rg4NE{k{)@8w6idP_^Baq<*?*a z?m*;``W<8%h@F}QqHEB6k&ZM?Cq63w`J=O^4W0O2C^bEL3P*m7qG_a=25Kv`xUjtH zALJs|{NOv#NiZ=q60+KV%fBi~`P>DuUmW(}Z$1|9dmX_y{5W`lzi`gP5A+f5!4IL7 zeOS)GOUm=Y7t@h?Is}m67SG38p%Hs9hZhwr(1q8GHd;if9E-XG_}6Ge0XYxVMZ{D? z)w(JpC<0s!6+P=YWQ_jT491F1PGzXhLGPL6Jahm$mA1gW&XFx*S0k}&x1MAO&JLiP z=fmjZ^e|GOBk7dXJ8_8eFxTV@ql89#X9ESx(}kcan>$C_pp~_ybmz`7VSWTJ;M1PA zr$;C>zL!Bk>t%~xi|!1YY0|J$Z4{nX07IobIId{lmu*;p;abDIPT+`q;lY*%z|ClS5ayLe#hQUtS%vRYw7;mn^c}&3Xsww1mkGM>G@0P%U}M}Gg}&E2_QmjjGGe(U5s z@f?PQ^>FLmEkh{o9WaCa0tj(0n>kxJ>WV3HLLL6kHvUXG2)61(37wsKS0f zMfiE`<`TS2dlx|6+!Z|kaDx30YY6n;{rtBHJ6>T=rwId63_zv59#(~modx%WUL-Ly{_uSNcr zt@`wu`JXw3h82BoyO9V`4T~410BIEUU2sM+vBIJaqc)-43LZG=U&U&1m93yGz-%GO zrFX6te4%%ZPevMs@Ho7F{A~fJ)T1B^eqm1q?u{BLC-6o66s3ruvFITO=qTX<=pGRz zAop&GeETzKL_=_M<1tLeExAp!RvczB)`nPhX)6=9p!G4FxoLxiY?1zMO z@8EHOifDfa4V$8*m(v=CBccdZ0B02h7ilEOF+G%#eu8Wi=*weP)9;_P$$oiJ+`n^PrlD+ zUtqg0jNzTklePh;a8BgR^L@yJeZr8_FoN$dpc}ze2g7qhqoz5OCecg#)IFH4Wa}@x z_Ou8STkxi*Orv249cJ(L!`HcPJjik>&mD+5b+6Y#@KQu6_9{6c-Moi%oIh0^xix zWoSr!$@kQ?S>$d9T2}53TrLy>?(Y*?=R2fNMD$Nw*)$CUIn22^ygedU0>2Tcdz@+@&( zzk_aI19hzqt_Lkp@%4`U<2&rBcefjdBCSGRHV8Cy&}p4^wnW}F;GO)c0cX*=U+D0R z@{3+q4RiTIJz+F|#54v!gF$Q5Nk?1`XN1PA5nDE&<*Ca(4=$%J0wXl>7HiOF&@=rU z(+A<_YNYZ(mqZkWx*5(eihg;X7*U+XF-tn0DWPya-cl7LR!tUgmqXCmP`E*!=$-p- zqm)6%2(A$r!Nn8_KiY~!uZ(Sb1`XY4eXg#5&K~Qg@Yz6 z;Id$>#DP27*hAGL=UjA8H@XY}h%*Q^r^2BF;0g9xA^vaY6{r;mjb0FuFamK*?@K|49!k4owDHP2 z!@I;at4BL1Hw*(kAZ`KYVY-FcK$>?prr5rQvC!GY=_T;ePoE!jYFUl20od{Dj?GQIKb@1)}Fe1LMj0nfen`IN2V z)u)x<8{c?2y^i7Z6QBNtNCSH1%P+BfXqD&6j!cFp)v%&DeYRoryh$v^cx%zEWibtr z=5q+0-o0}_HOOt8VtU`s<|e=ifXHX;!|HjPtE;XNM8$vv>5V@zO^7#CzP@jF6;oks zpsyTs`qdfUck`-80oHYD5G)!Q3MDd-r#a{-0HlP$UhC~siOXXG5dqbp9g@0>)AdI3 zA17)=LBL^dx-x>lc%RpKe^`$Tz{FmohomeFF#ut9j%jLyc`Z7=#9psI+973WkcWD} z>oQgyg*SnBbp%;Eb(6Fxyp68kLccmZF;54V1H5Y%c$%N=pp;2!L+pM~fEVf**n-PZ z_}<|weTr#o^l#1~=kfqjtG)f675CAO3U8}2!bxbDpXq3OGrjoyHKvnIay^VDq6QWS z8p$!8YIq!P61-IA!JAci5U(eDsu%hkAjnB0;{b=~8w=S4bT!UucVjEg2Q3mQ92&<> zqi5{*AY{$nVle70w7$ckGxn{`5gjQ|^MPPtZ<$E9vJ@U$^SSL#u)8@(e>!9!OT*8z z?;|)R)S4FpFX&;2*Z5}?09I++W*)RMJ`5j?CeAemD6q~7X&<#i^d4uS@#Pv!v(PB8 zwnIIn9En0ijA6aW?zc{(NeT?D7b)QcYG90xLc4VUcbN#s5jnbf0Edw?`zRmcd3O(o z0r6JAH`!wi@h+aUI_r*(5zwZ(Td%u?T&JAX*!V&!lEZuEZ~$o*db8-z^qMh@iC_H( zpGm*_d;d25-~asgU_E3%Yjq%u@)b=MP7pnxWqSUXz#oE}&@Pp4(~Kq4)#*hukM$UO=sv1i+>iDdKXUv0*+8%ikeh%Qu z^8>tGNUS|R51nySejV^K3XXd~gW?B&ytYKcdgaRi#{zdT6EZk}t=9`#!0YIs`0=y; zpmW#+K8KM)YTpp+SDtuD@ml0m^Zgv@hDIf0FbR6u{^0734z%JPX-e6aPW)&NZxK2x z=vW}SQ6ObUnu&>Rq@?Lg&|q_BgGRqXS5hVf=h}On5BYt8{qa$(7Z#X(cJjy$L`!x5V($DArklLXtDAXTunE zJ!c35i)!@rX~k-tG(BL?nXjD_+HjD@9NlvJSc%uKF0pp2`K(MvShC6Y-oBrjGnw?& zuY4@Tj?pTAMse~t0 z??nwJu`tkm^$hg7pVI}x$$EM^9n@jIwe6k8anpVLL(iot=CCj0_4>_!`cKmC9PON* zxQt1@M+AT_7PI7xsZ1ekb5RQwya()2Ze%X~o6mg*&+hfKaQTU7gJxc=4c*%mNI~lH zOfAki%YL=+78MDVM*52k0g>3=W|R`MmQ|KAR2aiD9M@36vwCubSJL6e z!?5%!w1J~@H{OvtN|C%b!y|oQ2BQXx!bQb#fr1@SF64tSusA%A2PMzo zg;6{=IZ`e5nQCMc^K+C?)`B#o6h|GsYj^KFO6&B=MRGE!a2&y6BnD2vYVPwPuPKPA zk|S6>W>u`O2Y*DmiTerf=-F`(ZM7y?%D~U&MEe{Aqjr$f-9tzr*HLh!f3S8V4krMo zB5jY%(YGpDFUSe4+^U2q&%@(_lEV1WyLIsBF$F$!2BAYtgR#|~wn&1JU7tg`$uMv`8)%Y;|6cyp_C zl)1}LmgQ%jN^=;5fAD+1&%ToDo`qM9H~@Z?kL)I5hG81ewVf(d8WR}KJ9z)b$cG;v zVN5xo#ngkEGLZd{?-XnCD_4<@E&j4Wx}ed2vqiz1dffbnhOpkHIy4yFQ`l?`J?THf z-SC8YyY98&zc5tb7k`hIk|-NrGXUX1NmkN&R5A1K3xIgsH0X_cd!`hWX=NN^6|ew(S2!!N+9^D#N7d+l-=4+H*c*${U}JzJi~V;n7W45&+m9DDGuBIA$$YF~_EG3hgSbd)l}cH! zdXM+Qi;Q6-Uuyu5a4r|;wK(f>K<+k0pz<2oQ|$RbL0briz+p21H}H(cFoX=x(i)BP zZgC#7&>*wkG4?LAUwBj5Yvs;3sWCcTI`CErL2-^DZVE z0M>kBCw6HF&d{1pW5xc{P6W8MY$1LGL}RtL#HI-4rU$@8ez2R}l4Sh4{^bv_u4 zI_AA8yyOlPYS{4xU8h_Wumx{v42E1V`h)Wdx{0sVM5`xpYMkWprdvEM zqlNwkoVB?g#}M=^Ek*qj#+EtL8vcPW#Vh6-A&MX<1#jq;ctQ85kLe%yhYN6i@VDQg z;2Q!O0A2p6BP2FeVdVO_UwqFs`F;2^zVDj!Ds$;LL?feIFGp&Rbsnb3^Z~7PoEZ*n zk>^ZrQ7$_KCW@}@!edB9K;6w z=M;SQ+!N5vUAmdBec-8duv1ApZ#_zHe)mqgeEkwc-c36Xwz0hFeK<3hx*_~}2&ahv zRL+Hux$)dz<)v_mD%p1kE*Of%)X0zZiCA_dd0U7hahw0)m_#(rAdJfQt>$+Ra(G>= zjy*dCRPqs`P-1!#D!xgiR=sBbA3CTM$H#Bxo!6MEAC==y0%+?+7Cnc=#fo zqf=UREsk~4m8;|!;d1Q|9&2<)Xc7)lQUTs2@2Fj>q?IS1P4^yehZp$Z_Jatoa42o;&)V}+nXt$>?8a$vBA^>FE}R6!XQ_K%}#1*Cc^^!5nI-c4J;_6|^M z&Z87zK9M@O6FAmpaZ4DUP8OfyPi5}tUv(M-$KuE=P!QM=a zBG~cp{>o>6ncuBo-AC!R(r5m`f1N)6N1snGzwtPI9M5;Yu??`KxjQX)93_lNLwW%r z`P@YMgU|gSy>Xw=`IDDP^xumHKN<+!jreP z^i^D@aud^O002M$Nkly8Lx4n`=NoCN-Fw*<02gv-xEJD75*M8&n?R4qsn`wFl&*Nf_ z!7J6MtREpnUMIKG9A*KaY-T95&M<=l2(X5*FkUfAWZ(d=klv0NJSPOuNc(vQX!9EX z3Qvmp#{nq0kg_boob0ney#ej1jJ-#6PGJxHYyq0_^~bNhnubp20^GGW4pQ&Z45P3#+a^A%E8+F0Ql5}6CD+j$1A3sFL=6p#tXd9Iip4?oIy z0--MX%mfPox$pVx7;HFQo~tbBBxQTdr`fuC0!-w!rVRON6cV@c2{JouzQB zbG(CBW`*Y^v`%WrX+^IUBUP6#5!F}$_>k*~jO)YidQi5vY5!L{qU|r~OK;s}@C=^X zA8w@||H#MF>ZLWt2Q&VZ{p(>&G%%9%j`R!YFS@-;l=oROcihUEk=sv+7UDQ%hNe&nonXlqgLa6`^#m?)^atigSI zBZFv&MZpb#{3%6N$~c`qIy3A?I)V-`7rITSh+(|6DtJTd7A@y7G{ibZJI7cE2d@t5 z0SNhX-W+N0NA&7ur@{c`47muh51L`!>h>(4$h}gBaGb~yfGYbaiWS^z#J2HxR!INJ zb5IWcnWO-k9pS#v3lW}kQ$67246<^H)IFmH#qyQ3wRN11D3G;-g&xojzgU*4yjzVy=9qqx!ZH^(BgxB!uM5(SLjlV7HGUZ#{y1Qp5l^X4Mqj zK`b@;y|>ONu0>`RknPrc^a1ncnOeZyh2Liu=GiM!D~}FHVL(g6QyLydj`ZNt;0HR; z@y9jUTQ^*g?M{GIuCIKMcL9`^KSF8oN3W*d_vrHsrU~Uhq#fWT19EYGWL)=O zqXs91&X^(A3CE{A5nK%#oq-TFz>jMXVZ>TuJ-o(05nSBY4TFDuem?%ame@_{i~7x5J)((EL=UIwn1#@KF-=R zo8JdRR&d^7P=x^s9Z0|S+;&~kZ68C)2(`6P(Sd>Ak1Zh9~$S!qA2|-9smqX_YokD13mZ1R$dM5tN)^lQx2#{R;3BA$S%#2th-n2tk#r z=-v%;XaoG8#+O#%deRRZYlR{-nIB2ZDWEI>0kwlY9Tf19ZCp5bHa3HTh}LHug5jd5 zKIX>JOV}QP6pNfQK;p`WpG@~3zfDmK&BnMAtkv8UiNIVr4FcQXYSk2A z^giFwN*B!9Xjw0=A$Pr=GEDbO20>u4ew9-lklaI=*>X*Vple_6oYrQWoH{|bVBfPb zC#e(ylxlbWYg0M@1VI1A=T>R!v<+d{$F$DbSJPkp@lU27ynZMB{^viRe*D8X@V1c) z2~$)7m@BJO^x&OHuiWdVuYYSjU4HJxw70dvx-?qI{lnvBZ$=foy)DfHwdwabX%c$9 zJAKy-G4pwqj0Is@%58vHO+myN**M)bhIas_a=)5@Bir2RRn==RQZCUBYBV$k`C#(`sAaYobX$kf_ee>&okv{P^e=*g_ z-zZt-$r=RRBjhxit7Lng6H;5E+lBy@*BY#A6gump81mfLYGXb5!*uu{7bN&eOWt#g zalIUS7%iR=XH&#$CAiS*D+uy$!`t$tVosyb!_RQPN|~O>N00Cpqu9v5Z{CF-FmTAO zg3_QByf@O@7Dj-DQML&xb1prVJNsCsg_x;JR zKt*f3MbU%%sc@^H6tjJvf@iUCB&KcwRB9M%cke$;Hwdk-EH0-Xe*0VL@BLSw<@up; zcr^#F9Amhfvu|U0tPY}ncqD00sM4}$rb5;}*$sfdg7Ny3Hz}+km4FKS+Xq|e(u*HR zJ74%RhAVOl8P|5N*z*juSOkcaC~&O9mnVd0tGSCbLOKoBTjU6jH0@%5M+>?#)!>|h zEGJ7TKfQ*urlQa=6L{EP)Yr|ek6xsDVc2#J`7rXN5orOMv-?Svz0#}M;2s*QDi>*5 zI`Y0f6q+dwIuGo$BaO485=|&GQ!JbyB~9qqF@3^e4AFy>`|9Cpu-20DDU>A!4nV^m z$32vwg-hMGiklP*a*(TZY#C-BJ1ukvUc*O^_tJ-d8B$KvUT@Cw218C(aMlA zXXMh)oKVwn2wf~JEkr9ey9Mb;AlMSTuAY$L9BHg)d5i%&U5c@d2Gr5BE{kIdnQF?8 z^kmJ7Lx3(0tjjQ05{H3^fFGl&(wN_|UL!{ulR3V&zN?ZatJ+Ryr;lFKm})v zUf0>83@Eaa{fc3t10x;!Z z2wgU_wwmTI-(bkh!+8E4^dQEAa4mBjBI*mB>Gg)e0O-n0Xa*Tz zLA}O+x+rR$)Dy^}9G^GB*&Vh}k4 zcaz7;vUhn651z-p*5J3;9RS68eI9^1dP8G5{_W$#1E^}`J5Ejw0QZFN7j$$X`*P<8 zgiWw$vgGczQ9PrB7;WvcLlQauL4;K&&8*IIVVVuIxH>IhbYq){j@XK#AVYKo9@fH$ z95-oWy%|<1ut^;lO{Nps>kT`nm_`ds>%hW*9z9@u7>PmfqlKo$(*r2laasVPhjwC> zDqyY?m7yry(8BccY)MpX-0tl=fI3$6jsmwosKr)>O9Zfagq?&NIg;#GkHR%umRU%1 z>BjZ+aN|L`vM@_8SvH<^Sp7Rjp5Pb0{H?U`%!ktJfBxq%En{?HtnSS6;DiL+Jfr>c z)>f9%`73{po}D4LjpS>dTL5I?ofExm8+Jy1mkrM?oU-{*gfA6D?1LcKNjRkfqb3pN zDr5znLQKmh!fb*fz(d>oUiL&7Js3@E$O?rAgO1_tZSuUzPL+F}eX!My);<^EL}4rn ze;lh^Vh=w3OFxxPch@-&6fRcq+36v9$_@=UPtQ{Hv9$7c$gBES>60IP0#6N!YyS@2 zASTm#buRtO|L+Uw+Q&W)LlmGHrUenwviGndP17noSd;(DW z-UYoXw(v?p3+7rmjKabcJ&Nv+U|t5*>h&BYcc_Htht446#}DtZ*3|&iDt?(%L&&@} z!L<3qf&iH+Kri1Pcy16fL38X2`)k7eC^>gW>pN-vHTwR3>_xmIfbZ=ZO5zlY8W4og zpGDA~6UIHDGL%Ij5YE#ue_QF@JU$?g8wE|iVw>v~80~C<*%0UIJ*9Dhx%p!P@(#c( zAKo{CIIoA0;R#tQ>>Pq)kT<1h0LT>D>C>P6>nTm+g~St7V<^KTn)4sf@AP?9^P#jZ%nUBE+!LhOhXt&si4)NEd;?ZsTvq+O$zYr2qN$il=BN41o(vp`|zPw zv?mk5jd0co9y&pE?*zrqe(RODT6c1uDtu_Y3hg&pn*$BHcr;Gv+%N*&)Zw`C5APD*S&b0Sr;A6HHBJNcQGyCVbJbMtSyP4pZHJ^0RLCRG;(@*L^!p-`Bd|F> zJ5JBM_yR4LrqVaR_)>cCc$@G%-WD#A17LPoi}~MIo+5g+x|m-6(ihXW|LiX*Hk^hQ z@v`Ebd-~RmG<$mqS+WJ)qJZ(Jn8szFs=R$8$;;O3H@mW&%0m>u(eE`w4`Cx)7Tnn{ zS|D#q*EnE9`J-nCvkqWN4jRU!;XvuLh+%95#{JeaGoVZ89y*t|Rp@2Xf;4!{|8+AU z|B$10uMwQtTXhKjR**+AjUV8DMG*$HQ-`)9caW-O;f%mbPjp;}f7b!o@2{+6HW&No zq>XV*zBB95(`Hrs-M1g5<+VBZy92$R;^8g>Y@t=wvQO6+y)m{h6o`xye%32(3RW9M zedXF(+9vPSJlP2VkzQ#ZSy^!oc*#+`bxI!gd!JoP)w>%wXU=iN03hJ&5mLESDSRn< zww9rF^CtCJ3HJ2tQpy#@Xr>KVTW~_Hiffk_EMm;#Q9OHm9N=>gy-6eC$TiF&lPpFp zkg72(f4e{23$FGUFeV#mesMAKQ62kz3a=>dj@G?C;0bU)M&wK{oq65@2iJM9b4X_c z_>R{nF!~OTc4HdYIN-t&zv}8Ez`HHfROq@fEM7aLrJG}Fb^Lg?p8FWz5$hb9MnqI^ z1im&$c$Dil4_busja-om0bMQ5Q*g%`O7Nt8*vA31g0Kw8Ji0n?q1xVQ@tmx~xvq#S@p zHqk>Ni{KS2{spl{=B)D&ylMpX*k`BFjn6KmS*CevB-D&n!ymQ{Es%2HxtgYF8jMbs zI);~pOtw%n6|;f?9Y#FeB0>i79(QBQ1)XddG$9Z)ie;LmYdRs-Q)i4RB%yzK&JpQE zPjpBvcp9Uii(?^xr=W{vrJlTIAteXI6Ak2_v=;qnxerP!j9zIW2pzp? zRFE%Hk3(GyPA`<-ZE%2&LVMgiOofETEv=P_agi>_TP7O9 zTlD9IKqsQwYqy9~uGwUehlfC*HAS_{fa@nZk2vynd{I?rAyL!)?@ebGqtqG~0C zWq?|Uj=ga|0lnNMZVE0c0nRlJmBl%W)p9)KFq>iPEE_KGAMJ;mdKyJ-WwGt8@ZCo> zaf{GWu8hYDhTzh1Xg;hictMDN3_Dwmt=rkKoL&gQjLG@Yc7hER;CTqal-$EJRnht! zy0F8FLSgy+EMc)>n$e%LXg!E!gi9A@6#$ocKZn<^&2tn0DkkuUyx`>R?9gWICR%r^ z03AZ65!z4?0DxuQ@s&l!;nG_cH>=*Lb37Lpc#YeH2`1@EV1w;dyc^EEKc$VBDGq65 zn(MG9HsthXtf@o3XNM}=u}ilI(Q&8<8Y|VaC^pQ+C_pjIL*QqxMngZQF0e6!0SWA; z)```QD*kaoh7ynypUQX@xf0dKyWzE{pSq)k6B0fX=%Bi6EmC-R=%O@lCFIA zh?m(@mWo%R#ItrG;1|euyS6k9VUx$jfA+B)M(8ZjYj}DLg;0Se`o;7&e(vw2m;UIF z(yO({>6d=-Bjnj6iqyW8vJ000F_+1&G>QB^o-3<>5hw_wQO->7Q3-moXKaRN7qoM< z01U&l08-8+z$p)*!WNF3w{0&&Q$K2it7KrmC*cbdi-te2aHFjkirL<^xiX$zguwR3 z2E}x%d`EAZP>~*qcod~63%tB%Sc8Ps66x==Ar$IhYw&SJeD#)duP6*hDQx$HG$PwHuZ(Dvg4t(^m_ou z8Z`IZlN7BL|CGWL^H#HT+t@w8Ij$Kk!ceBDN&wbz z7$-mV3qPN}`Q6tMoVE1C6PFlgy-J6ZOnT#M!!e1eUT54hYZ;;NrjD0#l(6bK;pk)1 zQY`*jyLvU17pGz>-Q~-d0E$EDeINO7`or)1Q|Jo0k-vfGiG9RytRpW*0eK}o(EMJ3 z*G@40tr_SbFO*k~M%U};V2ODEuLFH{wt9``QPR} z8oh8|Y~F8?@F;t5jwdvOf-&zs@@tWo_TwG!#z2nhJdl@^sYW6s9E~XHa{#DSgvu<= zi^CKU7=Dyj>|YrQljrXM?mcclX75p};TsJ`fikVqYs%jcba=qbGcQtnGBVamdyh9L z;Cn=mZ_>-?STgWIA@})?ycdQWhr8hDarVQ=#|SdQe9j8I(5o^kn7V-j!r|wnty%4^ zhx-`trs1pnbGn#qrA7b~$~wW1veI$08uurFw!>opAE38K9(EB~YF}z2NVW>oqkKjI z)&w4EjgV1fw>&&BGVMDIz(zLdL$M#Y=^J^#uwi3sLF98qhFvmfEDMbSNO-Sih@S7f z@!D%aw~=GbG3MdFsU?9A_Z**x?zY$ui~-Ji1hAmdZ;o~uum1_&GQCndg<3da1Y>25 zKIqBZR}Egh;sQeZj~e=oVx2fCj0w*02moHDNOp_?9fuSI3gGlyo3;_MuMMn(7mKvc zx}Zo^=Znr2i+yZ4RYJDfHRl+;G7ccrI5t1CM|w(&P<|FY(!iNzN}6)SjyM(CU(U^= zL-JM9+3@(1{sdDXU-<*?=-d(9n&QUtk$B z0-k-YZ`ApS_1hb|M$0{UQ*0ofGg^>GXXN!zgNsTOp_&e;5oDW8^=ps133a-_RL{3- z%^@>=<~*|I0>eBUOtKmu73{{4DE2Rkr;vRP>oQ+gXU=)mwsGh-UiaBz${*JcP^5f? zS6w3?7F7lxUO*4L0Hg9c`X5q@YlM=2*SiaStMB000q{niC|d%AD+AySA6mT<_W)3{ z(HJ*ejJ-9I$3t;HU_8&N5ol*FHVdA?h(tc*sK*f-)u3O8J2^#m28ZcjDk$0lIvz$Y zn(r$J_7=xPVWxA_I7^WeT-O3l4J5(1DSOc+h7W2y+D>V8!Ls+I#@0G74XBhB>I_<>f`;c(H(r z5sq-`(Lg!l-GXt<&mBR)#6xlO78|11!jP@Xz`xyWy%#nRS1=1od{BX&jBW0i6JPY? z*uxfn<9&oXOk5z$q2)2g`;IZRPwgn$cGO;=kH}kOof2GA#liheK#z(|K@LDpuR)W8 zRjH`B%?Sayq^I8h4A0#pg#lHH)o~89yMeDSSe}Up3YA`vpUiB@&F5y-{ zAzKVl48qf87J7d7he4oDW$8k+hMFW|o7G2%l0&sh(di+2sqHUm8i$@=L68DTCXO~p zv42>$T+8FO!{|U`P1}2 z|L4UNHpik{}i&OaE!9Y@`zUcD0G+~ z?UEZeoW6T!2cyaMR(n*HH}I|kz{opCRG^#>)6+c5`=4DVbpQd0Y-7jLA>mnz`y!7S zqsCs*F-?fQvIUh^^$>I*4@HQLZ+QkPK|&n>Q&TR0F?gVg?4ee>dmfz|03!0S%0M9m zYNTdokM)vb0$|E1Xc;G5AVR}oJ9-2VFwb}l?_&wC$|?6BbY4j5 z%9YFM2M-@ag?5e>HwU|WC`EcsU%3iU-Dda*UUBA=w{!I31=QqGezs4$IHe#8up7la zD5g3EKlUJX%2@q`d&BE`{3po=Z=rOfz!7j19w$COMj@WE!O3ZCGiL-H!k+c}>|%%igo zukGXYT*ITw>~_{j~uAw0Inh{Rs0G1=X_hS@IZr-*nZ&aix(fqA z;HgnH37-%1j2cAB0qFo%#MmUI31eUsI=7`6@Fi%%(ZYJnW*HW8hSNz;uz9-{JDw9I zacG9)zRx%t>oGDI9;O!vzU*XDCBNF#7jy3080!vgFsIVB8Z8(lD$%DXF!HB4<6+W0 zPN`vNQ@0Y~W$2cthYpcj8oKO{fVoQnq596;@uob85nw872|A7zXYUlGDXT4D_N?k) z5|jqC3aWs9`3pM6vBq_~L{EzFGr3Ce1Lun;v3W)ZCFro%q`1cY_jH9q#I-LBP-+K z27hy3F3xB9XJ9E913Gel;zj6GUh^6J8pJ<%-_Q65o{!fxsCBe~Y1ph-YJh3J!UM7< zUUThgBA?fDu@DTy!{!2Fl*n=CE%SR3A!!$^WkkR(Q1;wbehIY8c!J?9z7HL#r=fK* z%Ea<)$P5kcKG#5`a&2T6&lUzh*Fu4?2IUyb5G8Z?ZmqCcM?n^S7*jF%QXby3z}xha zcy56L&#X&UXQ2AdR<%BxCM38I%aGicESo&Dx{M&hjZJP=6DC(6m!lO59vYY-a#|4D zPPQ_CYEXTM(ZCI~+IoZr1u+(2af?vYk*}&UXT@5_?*F^qt0=@m*eUozaI)#XuRxA8 z2i7EGPU#M3b7u+T98!>Du3#50>en8pNy-A`=?2=#wKXv)VBs3cny? z0iDk$_>E`+j9q}K^yJ66NvwQV%QsrUuqQH(9vH3T$W?@XlyJ5HYo0Rtc6W;qpt))m zr@iATRY>yhG3Y4rg)!0;oRJ~{U7B;Qj1csNg((vO!q>n5R$93J1U-K*!Pt#-v_a7q zVY56;o<+c!k7}!`=)Jj2&yUrmXq9HQxxjOWdmeoEdub7xvCm}&8U5&VGkxbVXA1u; zA@mFfyB5Re8I+IV8i7cbePugikE+nE?V{#LyD^ki99{&70t)er<;Z#6ez?!Mnp?%W z5u&WpDym3`U0!sIaTfZXCjv4UYs-E!0Mj%EWv8ASLu;-h%ic?qp*YwV`$HR=wNQ$O z4W(-;hE4fRO_BDbOGD;@*$kemGk}C(Eys5aK`EdLfK)B0?M*r_aJEBuHI6qKaI&z# z7+}0Fgys}lFjaW`s?dg>!K1tP`TSz!|Dzq#Y-uh{R!NWHx|{2d((??E7`eHIc8y{+_Uo9nWeBl8 z`6Dl;i3j&7YP(P24ub+X!zSyqY97XdUoZmcQ#wKVj=5+W2cv=xK3C?9I?y-UO5fRC zT5&C~djLkh<4D~$&tyS~(E`&zR74FyB>QhZxCUW>PbC2h*Whik&(>OSnE)PoWCb>c zU0Z-Km4Ob0cT>gW6}BE3im+E``b7YUV|x{@kq+f&I6H}6r-HfLelkb@%?#lFGyna6 zlYa2#U*I)-8<|IRf&A?%Yj;|fhMpkykWi&YnM$R@_3z%>OdovyIspGD(tT{Hg@_Cp zVn}^^dnca5rtwDrXrEz<7Gn+g*X{uuq$`XgI}3OKZKI_Z`T{^^&EPpQO>k=p#u3J$ z5g+BM3RZ)`9QrVdXj`|j@k9FXjx*Kl68C3M0kAndc2d0UT zTFH`Lq(?oE%vZj|=Q#)?hebeSiGJ&OIszFMZsF~ODPrH)z}U+nx;YrSM+gJ5074jP zl>)Rw)U-y;M1$v1mJ5uUM6k@+9;X;Kyzy+79(i^1G`yRGua0*Qxn4E8fgG~0HP))r zqRv^U%BAm^WXM^|KlVD8@XZz92ka`tH0Es;r<^*|A>;rSQXXvAbV_ujPEO`F284!{ z?a>@WGX=jI!Z&2?&vLu{d z07Y?(4guGwJQu`AKVL3pacT_aAzew`twGdceTc?*PG!9Mked^&;6m&{7qAJW_F`%o z^gl+0e8#mcbd%(*Be9^Im$)x$U_D*;i@kADm#we@oWn=bwQWC5KeD@kUpb*TO;8cQ ziTkTlTuT(10>Kbjyb6CQBi{v$ZVva2P2#fwIK3FWB4|c%g1#}5_;ujf<8}5s-t+T< zH?b@<$iol*$jAQiKJh!B^ZOcGZmgYU)lE1LWUa*v)Vqe%ANd zJvM`Xvo55$kr)4|GBVHHPA@uSjFMPqRV3XCffv`Eq+UX!MOsv!PQiN(yy{L6HI-E( zQkiB%w}xR_f{#5HCxk|RH9YNu=#27togiog$bm8%hvLB4PC>h;z7>$a)ZlP|Q&k)O49VV|Am1=Xxw)m7*YT@1&AoKwiZIyFxA49c@olLz_V-`C%pRz&lXz+!EtRsm>^S&LU3MW1gGO6 zPnr4cu!m?GjgnBn*zZvztbri%UQ>j|oU+4hIH2LQeB&yg$KwTP9PQa50p`V*!g_NBvjS)n_D4t!u=#lG5APKT4Yis8Lx`}9aLST%S2gDhX0Yf+}9pR%VuQA~SZ;W2ct&L51u|OW*Hl8v<$ariFMfn`I1Q|l;Iv&*! zU6=PY78Kk9UQ=9bXC{*=e6*lz2LpxT-h`kF2F2sZcT`3e=joY={8BAezFQinM<#iq zrwFMLJOJhl>s8PrF=&s&v#|GiZrDs{LaUkIVLpMhQ3t_;RoCW%m0CmT`(OD|dhwS& zowlLh0(5?kC%h-n<=Jx3@yv}IXc)pgq%NGI91aQZ*u}!Uw#DnTYnz=+%g2w?cmMJg z=m}3s=a2%f&9nwUFHg`?4d%6v^{o$lI6eQ@{z^IlOxy3crI!=IvG;fjkI#A9zke_7 z<6%bhq~H1NFQ!q#@uo`5%*>_=5sO}q>3w)UZY|$T$6T+*dRy@9{FBSjFb(}lU2r6* z`H&X$4MyeSMMWWMv`}6FALs!e6wi4Mrd(LuF$B2lD}a=t&|^jejrSEq%7`=f2E)j3 za1kE&%qkSgoqFbjwg=>Cw$rt%SJI}26+-U}Aa0)N6p;v1Dz@+4ON&eMX#ubJ>Gx?t z$UY|84)ri3Y&Z&z;1R=k>X|aUtPjsVvFbgQ>c-Aax(0#t41i8waqQf#sG>jLiYmN+i%`U`^b|kH`iDM@{EMNW4wwp0HzYMHbaWgmFuM6 z@O<_C*5j8~aqVAd#UYg6tY~27)?^`En?H_s(KK3nm4? z=CGb&WT-!!o@Z)}9!R&+$bdAcyC2v;~@r${)cJ3HP##>lu@Azr@N#~GvXGVa~L%HyNo3_v)N{3U3`fv5= zG?4%Yq|``_^LYd){x?gGxsIgceLG43Eb&%eun!t7vlwn!KIeJXNWF7^OzW$2?eo)p z^a|jD=T;9mfJfOZfY4Lv9>M8&g-I72MPG6~Q$}VcN76F+#u~9kSxuGD8PzA>->wMJ z77rodbD4T%kBR{7?Bd(=h#z0R89|dXlsaC8I3TIv~(x4hlZ!ZU1Kd@#Feg3peH(@4V((wP@LA zh-2_x2n0Us=fzm2QnF|9UF5g&BNpfVy*D3;@A6j&FW=YFie9gL92&tAVQqj(_AR{d zoKZ{tfL`CL4^Y6%u7VvxKJg)U+ z9lq6mc6@!E&@hgD!_UVC$LM%VW?~!xHrl|zCO}i4}*yHdJh3Qk(W8 z*WZ6Ltt`!@ZV77+@1!2>76K(tTcge6-88eboF47%@GPzL@cVB>5ua6x7DDLl==1jk z)=m4tCgCkSl?W?*38{~w6$sIU5t+!|a*#t1S{s3BeyYvr?S-l*)84UjD0#iRKDS;f zy_XWLIZM7iMrhXA;^ECeIOxhpRK&BG^LR>el!@a@msbN=?o&vWqtECt6|pB2OJz93 zDnMui;oc#%_@f`A?`S0p(51>BkMHyxy-pEWd!Uu2)dltrX2%n7d_YrY&UJ>7lk1uN z^wbAFlK%1k@LTC;e&v_3q{h=9{QAEj?D=x~$R~b++{I>k@I!hS&XPwN;XwAD{I3&2 zfELzBOVP^_rDNN)n7a<&nN#LIDD?Hf#`9y{+t$Y-DutT4dI~1JJMyVjoBnR@9hxH) zxrUUVX)_tEG>jLweS|_GhZ9e)BS2LiPP{Yk%j#Z}eFWNqW`(i&>oTBcQ>hNg`MeCs%R#4ycpYa}DS+c1XE5jbl7E zjgvz>x+#72OD{**O#!R$)4>#D7XceXD3v1pVJGM!FbO}G>qF`F*M1oNc*p!VMR&st zWT=?7R9goO&{G?F=|f{02r)kx|JUOtB0i_mmjj>nbW3+k07&_Y9=6O+cD2Z)=%>JWCX_CT_ z%TIqGbpWBK4T>H(r|B|nkB-?tKHtSVy0*HKn0W?`(@AA|Ds4R4OnVm;8$Mzv$@&Hc z;6BkICRPE`E!^7HRW>pKU0hEyhmy~4JNC->pochb-P=RcqR zpMUh5Y5y@Tbs8C_5^_JgA&}luq??NaZ3L8~=Ji5*G1P!?>>d^KHRyZ@Z)lt6e8j}c zvBd@GiQz4H00ngx8aX&-3XlCU{Ci44rwT%^no+MX(3Anl#W9=^Rn}XE=2WuMzB#@s zd3#u!t^oVMSAYRzgC2d2kw|m!99f%ZW}#dRIpO;Of{}*-7NDQtI}JftDvON#*Z=aj z>DDuw-t!Do7tc4yy`|1UzJqr)V)cBQRxz&UzF(uhf&5MA{t-o!bI23NR6Bm#zM4j} zRN%SsX(rzh8KJ;(3~)0>N{%UHBkWUy=Y;71v3Ag4=+6?l)p#~%*)N}EpJ4|vO%NK@ z$5LR8XBZpytktj)SlQX+_z;7g(Z>#y81#n5TZdtSXD7gF9gm~=|8}>rLqK#)QTE72 z&=|WL;4uIdYuCfBQavE|yUV#5-q$OP5(x#{(-DJ?3eSmvlsy>3(Q*NJHTCKY8oRMN zkHdi;*E}C}R-|ZP#K7xzu3@)vWFYS@cGJ<&e(bAJC{vhv=!SFlz-}fM+?0qU+5*or z_x$v}3K*hx14l~+FE(Mi7_V!uZyqjy2|F5~#1s@>LU&>0Auyu79fqp;B|KoO2P}s({MOW%4%ZZM9Klr*{4sJ z^qglu1Tt|p(313HZmj)Rt>8y;#2K2hrRP_i=YsX?YPbZ=!o%tcBejuxN=lWTiS+Dg z)ESSmenA=5!Q0EpuqeEXJa347@e21=uOR%9^#Uqn7ji4d-r42J-tFoEYhX;t&@rRV z5@YyzJ=2~aABt-j!XHA;IW(e=6rkUxy9UPn zICAUEhy;$sJ&al%Bj$>T!M%_urCS)U+kxjZ2niq>3v|pZ9!d^ORBq~l z#Tk0jhRE?9ML?UBTxU$EMBDRl*%Q6G5=UtY0ie~v1_@w#3St%>-bOY&T6FRLpt!>P zUH)K+3i1@f3M@lU>^Mg{_%9BZLmR_OTLmnZLf|u@W{av&to$Z9V=cXjTq_S~*Q<09 zo>f6FVOv5=UA>72h8%!qd>W5}=gVPq8Km*d2cHHYF$$A320@Xzc*iK$!uVYJ=IxEt znOaLtti6Sm#q`zR`QtQCC1~;1bt1cXCQ*t#rhLre&Dr?gX_{e;uH(rkZH*nx}2l(+h zdrMweG{b&5F4my(1%O=-h00f=(Lx^w_;h4orchnfkI2EgNI(9`=lSd&fW+cB3gTFY zQ;)7L&+~o(PcFqvD4`;u%mNAMR!Zkk>i_0XzLc)N@4absW(FbMPJi>W|7H5xzx_gb z>CZk-6Z!X~%K#89wG}#Qm~V+Ihcys_+(VhmtAaQ7E4=)y+}t(kC76xp@chCu$2g9p zf0%qwoHMZc{hkJcMvdPwtP!DegpuuFINFdi$_+bX)K8B95&%RsDEARexDFXMKFH?; zyMkT?je^x*1r%OPKS{EfKgW33JAQ};!%78A*zZ@6 zSv=n3JYjAkB(yCMu0h%gz}V_{BPkl_3eYyR+1)rzTiHhl zgPs|KE{jFX&PYsh}%yuji>LwdY860c)@t45fnDA zUshx^s84I73BvBA9surbB%dwn$LptgisZ6@&pbfBib1B*n1ZH_Hrdi?3s2V+ps8@M zn||hR|3dohU;FjQu^#DB#ENX}681YdBm~Uwdi$hRL*7P@j_|74Z&mN4eVq&JhvB^m zz@J`qX|2VX8amc{Vds)6-bRaa1+)SRbKav*wDb%wTL2st)Brd8v{ua9hxW})bc&I^ z%H<97>kJVO{{UDSv3li|AEZxy;`t~fngsEqU zgrN9*-Y)lQAn!_ei>Jv8H+{;E5IyovbpqrpsI#*Mx=dN4x3`4XIa+V>y*zZ00j#x2 zs5Ek9xL+M++q@q7@PxC|s|O+R!~gMm#I)UTkP)dGdfxoz3VRg;H=v7CoIe&oFJgSD&qmmP<*9kZEuP;r zmNM5mqbrR$$vqp4ru*v2_$_0~z#LxC{U>P$GJab3(4GV=Sf}0{jk9JSsg} zTN3muAY=P9qy)vDg(*%(qC6u(nEqVYIyzZc?CTFFGLY-s)1oaCi6Xz0k7`uf&sIht z_J-c&oRkcLKm`zrK)KZ?DmR34%;M09wy-x*Q>0!JP;$*Eu;VuYxBSFAVJsO{@%p?B zpYqf^hba;GTs#zH5%=MLaX+uedn{xCB>jx*@W;R9QTB)n@q#qMYu*SN!Rr9%1z-6Z z^s3yA3%~n!eAnmV`zkomX#BV^{s_E%esmIwir2V?uf_Mg4}W4$_>vD3&&~DFKOqqG zR09}I*$U9hzFlC{h*|2$9}SxfGSNL3xt$#DB9m!NhVHa`iSrYV5B8&Qe|8y+z=Kuy z4&ilZnrEJ-w!`h{Kvy8f@EY3!Gh`)isQVm^-G>HUr(S%qW*2Nx(hIHr;*a`UCxET! znkqJrkI$i$IA>eoLbq+aB?XGN%x|hd>^qMsR6qb1w079|Q5x6VPqaZ(_ZB%dslp8i zH`||Zn8Pt7Z`Jk;7UId~W6p!m!tfr7`Fj?qaccl859i*aR1CAU1$7btuSrM+ym^A) zJey{|t04r>vob%Q=2mG(gD2Nyahc(OP}LNnmjwXYo8Kcu$$7N^X)WUSk)%w8u7{@g zBAO&~P`1mkf1gUGd49+qFd68|LOR*qNRJ*ef^lpv#xOPy$%902H)s#GeCyft_JarM z_~8c3O!e_|9}EvqVVY*@tgDDX$Zo!cb-_CImSq43fFKj)DJc+6q_;Lk*5zd^(O7d~`7O%~f}hkm-xJ#!h$RξG0Fb%J#$CR z(KCNeCLSXnXMTE#@ybW(rEk2N{>I<`d+EDxJ_G<4(=mCWAN|SyFum{K1LGV zq4ZnVo=Tg9@AODDVKPJFrV_YZa}FDvov9!yD!T>b@Hi^?J$nU?!t??muj0^hQ(&l= zt@>19d#1{T!MXM#*UFxv89p!gvgfA4QBSVQq)Dd$EpQvgJ3UK=6kHfl@cLPv-|wOF zp*j5n3Qg#UJuyd5%Tq6hd2PqHzn7l+xnD?+5ylo1<*0@p#hdKR@SzO7C}|>paPJ;g zD#dsb1;=5bN9TBpqEI}gDDW#cpG*WhG2EH-1?ZcYyOt&hA-?tU*U7W1&{y(_bi`Wh zW83MSV`x$+HA8Ow+Eu^;c~Q_;4;mT<5s%_Y&vcng#xMo@zLo(4Dmp7$k2j!MpOKB1 zXAA*%hVTeckfDKJf4GbCB@%&xFOlLRxX7c}w9+rwJJW|g{HY&H@BQS5(iW*S$Mjj< z{NCNP{+-(>ag-T*z^(+XS@fC5%Lm3o9?8q>HHyKCTMIGt?wj6pPI|?P-eQ3*DLQ8Y zUW_7;KfnVq2;DW9LSW0CQ}(w!4v=Rp6Y#n%llm;Y!agt47yEV6LGIjr8{oN})~>H2 z;{kRk)M@hWCNT2GW@pnrBTNMd3J6mus+@u6T;|@UzSIDt?$hd1*GLz@D++CP@CX!% zmK>t+JA0g4Z#{kbzxz!3C;!twq)_qUAk73G88QV3P|_gOo5h$ohPO|kvoj(hPGZs{ zWOS>8L1mQb93Zog5p18+Q6?eP2|q`XAM$YwB_u7{w^y&^AdGA}nZ0f)p5Yv=MeuCT z*%yt87M{jI5s=DAafjk%@1bI3FmxV@k&+Pru0TiNC!fiPcEhpFpDq35i*{ax0B>?_ zd0t&$s+tPiG2f#YZaoZ%3!M&nPT`*;a!a`-V7!4MW8bG|IKM1y$JW#H&)j5fMDe&? z1>^oyu!auFQW&U1LXgvhXDSpNSwMA+v8{1w$lk(CbwNy7IJTm$+7VDw3;86vkkPt@g_;`M^D?K!5+kdlE`H)BS%Nn|r*E+~b_>&Y+WPkmqfB8?v4=)VcvcAOHYB07*naR7>f|V1>E#+F|kfTbuBa z9%rxZP5RQ~J0Q@3O%l5i(j0(UkFE0oK(;te2Tr?4J>WAXK{T>m&|$PpFSOs&u_oyC znFLOPFXiPahE=475nZ`xlg`3t?Q+r6NhRM5mIH6|-0Xb|9c9-gv1Oh}mR@VnMd)>I zH2{|W#j}}=!5cgeJdSbIaLw#Fo67Sej8$m z`@>m;06@mXkAR*v2Qlc)b|YDXPBJ4wMx7%>DCmua@_Dya!_C)t8+Sf`TrYnt?q@9; zm!>qGb3S5YzvIMA(+vvHjc`+=@Nn;Ong#4OFd8=>?||1mZ;UFUF-?r8Mw0?}p5x%b z20$4;!-zdF^2|Eb8Sa5mgAg9v$RRBb`G^~GPxE@iaNs@ratneT-eIe`0F06pk2K7= z``kKmk;32rS{W`-bI}2u3>zbba2*9oJt`JC%!Q!q8y*IIN0Vui^$RFS=StgDx{k-> zf}583(*-auJaH@S-@6?*Go0nbEAu?;k*GBwttznnu>#ppkyb9ZMZj@*T*n@t4pA7$ zGv455jHxCvgGPqX#PD`ymPX~gV2Ew=@k8zjIYX4u4uLjju zJp);U%f@TpqS6+j4d5sduBcT{nHRqsOky=}cY@Ro)=?RySKVZZ@GIUou5FmYU{Hqw zqylR~$HcuxsX7C^D_(r~ji50rI!fJULj?+}DVQ)wokk%R9jcsiLK9?^B?hRl(Dw8TJV+ zu>SaqUr!U)Z>Dd&@6mB%ong?(X@o)eG<(YH{LU2yLKWtUVHD7lVKD`s zIfe~Fi&~O3q6v3i{?kLZpG|AaD=9s( zl9o{XfAD|(tMs4!(%-^(*iNs#^#;X&Yw7Z}C-7jk(*19I5$^|z2W5OrPWUh>L~c!! zYX1y?G1?{d$km3QN`MQ;dR}|-YI>U~h;z5dxxV)RPb|)66q2DZjY$Q_yRD0?*tbc% zAk}bqk!K@z052mr0C%QAv`kq-=;bI}09`<$zf_U;=+dx5F5szr$wacJKK5cdc(|SR zF#ru`hWDN4Zvx!=XNL^9aB|pS@{Lmv$1&W9YjHkw??BOSbN#s+%jph6d2W&pBBD_Y zBSG2O(Js76F(Wj&G>`GPN8T~U#36=mpXVRJiz@&r0Ys$VcKtLaW_{wvK28A?MN-JC z<%LD+;^Lq<7beQuc;y7~P@sKxLwoR+bm(p=)?|kajUK zJ4f5H6>=Ks$nkI|z4-ATOJDiJ&jZ%*Jb~hifTFgk88b7{>dFoRqAa~qdW4Kt=m9ZJ z$J|m!1&`x(w8h}g#vc0({i5*Y2f>Wr(X(rQd(<{?KFUEust_{r1S1r>jv^4`iA9!r z+5BUuTDd3y7Ub~AAybu$f{7FM$8;SX5E0g9sRN)OsD>xGg#&(+sjefsj|4A5OMpW2 zx<@cf^2jPK$2#!R!h^fJy?|7_|rP*egm zc0YK@jseku#!dq$@zp18utxg+@?7;kYvF9oA)KIK_4LHP*EloyzJ`H{TKPIMN|X&6 z%;9D7Y>oj}94&Om)Ij)Ac|NL>h38SA)NoFiudN5NL;J8g#o~JK?Zs$s&~uaA_dd^RmGiHqpH3NEj?TLH=-8hL(OztO@^U{Q^ z0Rc7`^Sl60y0789!XZd#6z?_Q8(n1^Jm0tVmnlX8dbxK4eG*_48sR|ri~x?`1w!5f zU1lwavZTd+4S)}fAs7`P3#42F*N(pf*zuZR)_cM$@w&hJjObl|jJC&T0G9(m%|V=EWNpLkiL@mUjQ_fvp^9 zvynzx@}`7wR^BcgS%O>mQv=&*KnOkdm(IeZ-kB$i&f;}G9=zh(O8nb^pwAO&E$Dtf zU@rX-G@~;n(mtgbgjsxskF}Iu~EDqClGRh+(ZTH!s=pr z`?XgQ7A(kpG#b61&(kER(O`h? zU;lpZ`@GNdeZJ3SzNrhMa#2O(1Qtz)dtJVar|PU?qMXk)`x=cj_9s1d#{_8viQ z*)U|IgaOez^UF4G6$$`l*~&1sZ78vBz-XNfu8|{S|HeHO_`xoDzJ#1QSUZ)RjI9*C z3~3dQPIhabylNeTEMC;c3cWp1WIn5p?35P6spb$Fj#&bj#n?pFsJ6~}(*CMWPtuP* zd@}Wuk+MmCOlxy5?_-^1Ik;EDTqdTE5Mu1*MYglqV~R^h-$=SmL-<)nu732hKb>|E zM)Q}^Lur-uu_1Kr1F3R7Ve0D*{HEuJR_qodPe3WC5vHr5mnJ2=RQ$chD<#r=CQ?8PfM zRv7@7IS>v@K}iCf%-b^x5Fl7*iM8YS#(~v{$U;Z-htHScrHc;lbD9_mn_Te@4syN) z1Ig#-`29AYpQk9RJaq|BU!_=zeuM*sbkB)L((de?bmL{xDc-%Bu3VX7o0`&1_GO!U z_=7+Ef1;PLlkl7+zrt%sOh|(`n-)iwrhXCwEX*Blq6o1KhQvJWwRc~qL&gve$TCHf zFp;@V!o@VA2h2c%r4tt~q($;4DIWuLpmHEWn7=Ru?eZ8w}%M%rED}D&WEaQ;_I9 zv3eJ_nr;c*6aA4+RD;bvn4jiso}>jF4md?;EjVx^42D^Np;DG)hU$E_W6n*NqjVw- z!j9h|R68&-mTup=hAdFTicZV!Q032Bj{!dO;{((oz$T+yw_wWFXIIkF^&8wrH4TpN zJI>k4#(etaU;X(sb@knJ<&8ICk!j42&Y3H(5t>_$Jj-T*gGvWoXbK0a8o*1m2K9GH z7{7p_-$jq9+XPrLVCum#tVfa1y9Gwdi8+Lhz}4=^MCrTMu7kOPo)Hnm`CRyMn^sm4~AP@{D`-~Mr#yEEvc^N*>pj~G8rUBY` zvqrrOebPfdKM#RZwn=&i=aXmMC5mD@!p(Kg4WD`Y=G`=m;h(@^xI<6fLsGzWLW*4s zI%sc7CnnF}aNzV-2+Q;NepXCi|N6;PpIO=P(!D_|{ zx;ssdHbYhVkW1NDb-^Cdv~J|joWphuoFt2t;mrWK!XWJsap3JBU`j>}y|fINw3$AP z1j~T-(2gKaSbGi!@&F02-h{qH_u6=WgHWusCz}A?FjlSqv z@jfJ#BOLo3Y$E&KK45D>@^;a6Iu`c~11N3wvGW8Rbz%=q7W>Xy;8nuNUXeKJMCBi_Ls)b88{?Y3s(|3Uo}7bc)qA z+WqnKx8C?cdWL&|(kA-0oqq8%pN2&r1mqD_M6UT*Rt3F^uCT*JqwtqhLG0?z9(5!* z2U|o>+o>biF*h}y4KT#(1tx8r0d06IjvWurp5kNxcx)@x1l!+^t+kz}ZT#{?aJIO2 zL8b;=d1YDR9JI)0v+kw}Is#eyfvd*n0ICFW=#scu&SMOX;GV?-f*_+H?oAeFTU%j7 zM8-DDGmqf3cn%4Dh5a=>p#~-~H{6JcSW<2bwAYySt5U?|8x=g+g`ha zZSaQ}wF1InTcS=!hVq6MdPVvnR^8P(^jqXYBAPse!j7uursISW?&@Uxq%BtjlcILP zQ=`iO3>t?_gzjD&ZV{x|K_Qwh^Ke;|uS!P4D~PeDuL?m=QqRACU?3E@#z8?TFqX2r zzaw?A;S1LR3wWQI0Lr{x#mOQB;B^1Ee}y_4tg~^3rws8sk?p>-=Qs@eOnzCSkL(=b zomq@!Us`;$lq!IX&cTtW1n)#BcbJ@Y_Lm<>n<)6TMZ%JJ>V2>nnL9IpA&OQoijW^D zab|D}nE09Y!b4^fe;hUr!#?cpD#pRdL3tbv8-N!(@$&I#w@`YuT`IIuKK4A;fYn*2 zoFJF)O>^MLrA-uWfl!tnx3cRX3mbe}nZ81LN``&0$D`rJ2m_P1VyHKVQNRD<1LO?O z0n`X7qkJ|n4sEQlteWX6dd4~>h6s)i^rvbOFTF63e*cT~qP_1d>(2+We|E~yzjMz0 zI>NZJO9+qqT*q+y(y#ul^rP;9^ukl$NvF@<3lq?r=1a7ZK?Xd$A+aydOi z1-J~Tl%;X6+<%4MzNP|Gb9rRF72ud`L&1s0IlOl+q620p40GKw;dyeaD3IuB$^8Wr z%)ybFG7Qp$*DW~7ptRf@jx&ds&qOXNAo;H2=ThOwWXMr{V=-;sc^8Jtb%D8n9r`%n za+4(og!yRFemKTFzwIW}IDfsWeFcc}a++11${H&uW z?d2%*wMt42><#yR{kaz+`M!-JN{vbZ>*TrI0w6nv(e5WXhM#Prq@3~YxhidJ0I~tQ zyk74;OWQdfh((YMkrBd3vR|&HDI-QF%#(zAWo}IQAiZHazMVs{yF@T+bgjPU7 zX5F@NFk#Jr0zN-WVUcW8C&iQ-BO1>Yz$2$G&ryfr~{`Rvds&abh z%p@T?T8iPPJqPe}y5axy>%X36R%U38w41glbj#w1)=IJit?5sH^WTsUe?C3-@kf!X z^>pj*?X(9F8i9%3ME+{%thMFMbbD={M>UNFr;4z5z7FtYuPWTnCZSa)c$s6?%l%rs z>NwXk43uH9kb77z)>TGjrhJ?H#UX(Dsq`xA)&;ZG2TQR6c&KOF(=55xkNv_=r%P|V zk&aMsc=n-lX@uP1DZucv*wh}KP#fw;e(51VT;r$i|6=(a=i{{Mt;3yc;$OEFyGu@+R zVjI0!ge_fNUEw-o*cA0XMiO1Mzuf39;rf>Muz8%#cJhXI=$ydFu3+;t@LVj;4k0Pl zf^)=k#)K=*$imI3#_t3)M&U@okr}g-3KSA@McNEHd4moj1V;h;6rnE8(&POw4IMoi zbsaMA)>p{dYis?;ZHN(>)n=TsDj;W1u*_NQ=d&H-UBn?)r^%WHFi@_r8yR%JBw2wd z+B)lb04cSDBcv|OaklsOH*rLmipHMXrNKS*cX5u9wOzoacGr#%tILdore8GzO76d% zfGpTU3OQd6r!a~j$c;fBMrY9+4h2#EI!WEj9+38joU~%2VMBO5I+IA1!+0#Tu=4QQ5#&5< zh4z}!Bg-hrX#MzEE9Ry?Sn(ra-|9*TS5ta*Sv$gT&^)tE!T z^I*em|D|dTP#0jA_X;d{YX}W^#5Fad7uWULJU>dm@pJyN6N`~_Yct|=`3&VJUZ)J> zIL7q>i#%R~Mb@aO$sPy`quYeOvire z=h7lxl{4TsZ{9*403}przYB=zr2SV01K!2GmKO<=!u-iTM!1H1sqs0HXwTvF(3z)a zqC1itj+6>c51r&VlmHCx1`XCZfXy)28e{LhiF`RHlkK|UC?7BkX9E?$8s!1V`v^cY zie+nYEgXdk#y^Y0A@h(yQJF`*gHRp-3|sk}It}#4hV$v%$pONfw*l+4vq8`;DBDFS zbTX|h?ZCrUG2|JFAgj1Xh3T8Ouca$f)9LY#ekkU^m-sB%#|&$_4Xffq6^Rba0pPX) z<7(Qjd_}!or#$ihdv6`L^wqdNA}j z7o-jJ4&hak9CIe7+1Th^+Yy4eqNxc~U~ ze>ar@l}^Jd!u;-{n9a#9Qd!)B{`#vw`}34=?7IM58-Qo0D-{iAZY*;=SJU*=9V*@p zPq(I@)PEc;>3rT-Uwt{f^y-Vq6OkEmKuv9`7wP4V(=O8%V-bIdS4i8(+1vt z_4Ty1jN`}W2Ln&eztgtdivX@fee8n6Y6B?AkL6HmXiQ;f6uV)p`Q8*UnOY-YhK)^8 z@ctb1qn^?@n^vGMb;6dpmld2m9Xvs76tZy-yB2ZqSvrB9C-bW78XeX>3aY3Al4Y@w z&b?YB;@3wi*$Rbc2gssBdcs)*v+eSEWx!3IGi_IqIrPE;1u46wjWj@$dFqtFKct|L z(q)cfH_n}U!<4ulh=ZkhdPsBD>`q{mOXlZ*`!d(ptQS+VEUYtvQA0K*G=f<|zSW72 zftT@a)IOkRkm6C8dz<@zP%Gh7FsEA`qQ6Yt^7UI5l=4}j1GrWw1Q$hcfi4L^q9Hd7 zjkXDfL2wa{e<%+MN@0VIdz(80K*-V7@`Ieu5&*o5&1FSdBaq8B#09!_3kR|b!*BG- zG0-|b8Nw1p?5a7pwpQ~t`spB)W9@YsoEo|nwIDp(@lhNx3QhqMS@e&@MW)#njMP=& zJXNq;hyr_yZbt7o1<~~`6Ap|s&b~VB(KX4lkBii?l(@zifZX9BcJs08i9pTvnWooS ztX4q(+5g>qP|uh9U)0H=n9Y3TPUNj0Td>U<*Y-IZU<0pHB4RYqi{tOQgYHckunwzA zN`qinz<xS3@?K2B<8^BAU8o>Kclldxa#@ zsPQvXVj9E-%ree^IidCt)~@zK9EEQWXe+QU&HP@>qbS%{-P~r=IigboCsQ&{W68&J zys%3GOZFv$?Ua;ojSb)DdCeZ@5k#0lcEG*r$b=H-Ic0Kv4)>>kK6M?Uo0niW zK8s)QKfib8Kp7j;LHHrNwRp70JTWn@rV__8ZK3c0B0BlesfMPYg4nmjBtd%*Zg6uXF zRu#Z(v6IG4pl3+T&({l7f#_{|CI#y?QVeQvhAnY(KitCs-fyYQXB?y0Q*|qTu7h#>`(D}ra$#jsKP-s4@M8E z12DG3)GPCAs#fIELBQfaJ=Yov?CBT2lSVI`hX5gTj7M!-5qTuLWK#?#;ZAAc=9_w8rWG!6WV#SsiJ`)4j3uk-NR{%98lOJ~O@fYXh_ zNx`7Hmnt)7ddmXK{(7cm9BkX>d5EGsq)sq=AZSD0lm!){+`eJZ_&0Ln!81KV#p4tp znQZ>a7hnbDqIcQSJ(%hRaxw|Qsuaw_+oz{59~e3%3VM*$0_*C4hBF_!2w*!TFMB(U zk$Pi6qG7!o*M9;F3d2Vvx`kZLmF0PUPTn7l<1fAW-E@RjX1nBpE|F8cOqYNP&g4-B zdYHpxj-!Pk<~Cya0BbNd?ECPsiPS$vn>>>J_jtzMlN4G3+)5~yVL&-&Jl%TlR_Yrc z2WXRG12}x=)mI3y0-}(Y8nV&`u7;BOf(dO0JnS(J7imU#T! zeQ6)#=ZyLR`Z0F_PV*E!yz<=hqy(La9@s0|m$MykN@@iEe)7EtOa zckX4Sx*QDnoy(2$cjMOG^!=B9hn6+%(JMEDGi@8EIy$JC^VJE{GJT5z+RL|67YeqS zu$RLHs_2;mjJ17E?|J;=X_N8Fo!tK#1z`nR1Rh|JcV=cnuNN4NE|4k^G09>FvVsZZ zE<{7))Pd;S5)f|7=9vm!Og;4ApfTXoG zx||T9*+n0j&!!RMzy>%}tQ=`;m{UWbyvj7zki!g4$2|`}nBIKt_0*H){`gPk)=;)O zOGZ`)(5Y~HfB?VLAx_wm)mC3R;Y}c-e52v93+M!Yj_ldf+diPS>#@kkvo6bTWW#Ww zuk#$4V;f;u*fn5yLbxlX;CfUj?a=DF(KVP z*3CSdwnFCD^J|~@1q#6yahxlh;!w2^fiT6P^Z0PcvgTmNwq_m@ z_CX-KZzxx!r4p8XMCS-PKvxsZ7}5=SxO9zC6E&JxQ3@kCn_RH9NW;FvVV~=S}Sp%;{vfpE`tDo=*~I7YC(`y|D<@ zXs4lcMZ!Y33?!L@Ts)-F^@Q)lE#kr&&I}ffpLjFCavT)zC5!&HzQx|NAEb^&cNK0k zIt96i@x>3YUmPxO*oRW?{48Fe95xmng^k?A@A)}<6h=uMpyKe40GKbw99!GdFj~A; znO64QT=ttyYrNle6!`jY?EtzVuB&1646ts3;;gOT&=*la2evU%D|0NNsRQny7*j_j z9Xf`HW^opEtS!7Z&)yW25`Q~DOxD-v&;b#a9&p$m*rVNa89qPO$UWrze=yJt>2}4z zY1{|@nfhjGiq9K^b6B$~`=C(hY{W!2qLUe>bmojw;Dm3j%!R)4tdRccCb3We*fNI3 zw1Ajv51XnN7~wg*btjwUX!Udm5RiJ`LO2epOXTmOX!}Rg6tB0)N08}O)d>aZ5q-c& z_`QJDaH({4z^r&jElL zgMCrKk5DcC7RIx9ZY=%yxu@w}N_edaWdQ(KVxIQFGVP}@su~C@PjiHWW10~M%e*v! zSKVY#o;^pkV}`JnfK5S)zu8=?KqD8*edge?N8AIlONFle9V@WP`*Tj6{*zk}6NXV-JmtzhFkg zt-Qw|2^47#Z4-r#AZ#msJroEc2Lne*W$b}v|CJqcpDk1q;CMhhfc_CY1aJlz*rh>- z!|;9`mPTb3MLJxi4MSSNKpBq2((QJ^fxknCV=ZQ&xEI;oBm{W~<{QNk^ zp8U5(hC`5ByIrRZ7NDSx^4uUlk#taslxWw*S`HURQtL6s%r^C<@BjF2I(?DwKgO$z z`x3;yd1WSb^bV(QedkAf7KRdKZ{E|?%pCGfpUOEHKwi5J_;oC6&k%)xDB)HN(i{x@ z<{j96oRX7gjxd$5g=ocP)|5WP7)~pTJNpOH-}vwULF&JUmT`pMCr5_TfB5|W$(cV$ zpZ?t6p+(dbV5LA0)sZyB8T-@U`mHo@=49G&q7Gp4;g3ARS!O0ZJ!vgOIQkI%aRGD% z9T=3Bg-ug$yn+L>8IJ5380a>_$}@9wBvZDhF+Bb)a=3CRo!OuKI34BRqmNu<6f=dN zcNZfky@-=nUZ+Uz?wzy^DC$0Tn)IdKl%*qxPDdN>?Sernz_PDQ(+%Lm52ria)6N#m zGc6VE72C`C{lQoMGA%BzrBk1Jh(RFiJ6meVvk!!oM>i11NY8!YYw5x#FQmZ>$7z@k z@P~!UGi=7bunSk;OAmkaAzBgw?qE6{QllZ8q+j-*{p;UPANt4>IK`{!jTe7N`Vdh8 z^hFhL8j&8(yed_O3eEFAo1~6Kb~$(ZM299O#$wvkJ1@P)5R0Rs1G`ZEj=?sxDoCxM z+jP|J)S$j?6zg!sFnAWm&|BN64(Gtaszdajt;=l9rp%juV38Xt7F~u6pyOpwH_6BD zMAo+`vT3D5N`OIu6^E&X!dw|&6fozD^ac#zCKdR7W3+JNaqK@XDP{D;u%u~JTaY1} zbZPKf=6jp3I&)`^e9SK7Nr2fzWQZ;f7(zkR`pOpfKq?iwOAx=hxK5f?DT>W(arQx@ zd7sNxUZaCld;`XHjJ(%90Tuu-N4F8}gyV2c%Mi%e-_{2A+t{Wi?!|N=P%+O$erq%B z=g1{je{kbmOFhh}mX7CBs{SNx;Dj5P)8Z7_TgzfP?fg7M>S1 zUaO29pp$ZRFUjC=3W)ZIq#=Z%Hv<%tP9!Miq+{!NZiL2897d#8r8!|76Hhn$*nFfgww9fb;I7;8yjH?d9ydLphu!gTdob$w#sPc(VaX+*xVl2yE!_? zC=Y#vlbtYFKv&3q?~&dTQ76uQfF#&xv6Y!4yF6R;vwweUw4ttxAXs2#t3m-2LW1p* z)DX777>3U0vysc_A%>+y8B4-J!HqPIH&LEDQ z(P~Yn!&FRZ+d@ZgLmC)~vzvo^g~i2Rw4f0iN*65Gp^5A2jA+|4Ts!WW$Cu?-*9F_| zv+-pYCA<;#i+$p1I#^9agv434ofB*3OkJqpRPL=%gV9e^!Hz91wka?=O+3G2f=w7- zy1R@*HY848L|inz@n97I8(^Sd7Ba9X7DOp#Fgg1yvVCVwiFo_ncT?Z73p{@z zwIgVy_pSokQW}~(pVp~5UnN}JPqK9lVVHjX)o2-JXiU~x=HmdxCR@^IqXCHH--5Y@ za^NtdnD=1waA)I?`TTm`F`m>-x+WB-#ch2G5X{0FMoN~zO6sV-LDpr>CUk5hnB zWzSS_vO~5-F?1UT96{Fx#eS;*k@l z!X9I2v}#tI8upQny?OmQ%<(OXc4)hGq>quKFpONg0E4nZ9%)SJgWcT5z?!qX3sc(G zQ%L{KXa9Ek;Y&YE11FEC9@=*;p#VxG^3SjjJS_uII?|~p9!<9a)$ zw2s1BpW*Y7+biU{AEHc3)6*1&z$U|l?!qMO@eiG}CYh-537izFyAGx`RGn>Q`5&Zjy_y~W8Pj0c7ju+~4;k?wu)O!~_|{A#pa>X@7i zrmzZVvA{(Zyof_0qg+9UGyzK6TKcWCAxy}Aa_)4;0z_OgdYKEv6`YX@(GG697lQe;XZGmkcN{`?F ziS!;hxw4mf6bGC+?GUF<$&B-DJ2)98L$2yv6|Oybx9H^3OSpPu@+jwo6pf)#SUVgA zjfcjYHB!IYwP9c5#m}NG6tZqQiEaBlGXf5$NNTX#(czIE#`_GZn_|@lAZ-l#p^%1q zGAt}e)nT%5&O#^URVP-5*|Qwr3;B^H^NhFCDFGjtkZ)^rn30i=oD_7}8b;0Hu^t>i z>jp@4NAIeOt?b9C#qr)WI!EMbg}p97)+vzd8r|A}_UsvGS~U;RKqFW~!CB zp_}A&_7H`#Q^FQH@iuwf2QX(caN6%~I&Zj6k@Lq0Ujenw*QsMiaH2>p0Vo_oP+%C$ zy+HT!FdBK**mVuCE5o26^n%vy4l#GDbYxrJ&hV!9gCtz{( zGn>pRkb}=!eo&L3{*w6^rqB7Ne!zM0M;`VK45|7SG04Cr$%*9z#Ez zu;x&bCY%B7uT!&RRO8lLs65Yyz+aeh}~rUXgFp(6|6# zjUVxP??LIaLref))}=dC%rSJJ#4t11`cnu&o7t0Y^+CI@j9Nd&K?VD9q_fynu^CwJF1bEm*E^3QP4ph zqUf)Q^)`BdCV{NL`NZz+5jk!F^tygJh=OEsP8+t1yu8rK>J0bCNVPI0i|+bI(*xeu z$|gHoAllAx86)Z$&ylvDdyg|qDrg&jm?}mKLvRcv75|9yq~qs=#$3UJknJGM-oo^k(hh)eD&H!d0_^2GJU&E@A_kOe zZj(772z6P~YHqbvmV$qaG8A&VRG4pO z_(bI*YpWcYf2DF#!T2X~e|R4+31w;CkdBVp$S|D0(BX(2R^IC#99jT_y!IX)O=lmz zkapPbOV87p1BGs=yl3DzDGS5tkleQA`KfgL^eF~#bRq+^&>}q3(cPccuU$zC({vg* zET*<`3;~}p0{gp2{^iYgE~Uxt`_oBsc$>?!snbvV^*3+>L8@#utl?skrHIVbuc>l z5O9q{a{jT$($bZiX>MVieI#mu;wYgs#?GEj#~!#h-C9|Kv8UH4dC1%3Ho3n=n3p`> z_byE0?ChO%0=9MT?rd5`Zr)lUca`>Dd$b}PzIZ=Vt~SyP#^K1=a5^)31Z9UK!n)eG zIpg_3`I*0%$1xZgc_iIsKd!v_RvNkQ0RSVF<;bOxqeD6{nE#o_=obh2Ip6)ML}!IE zhMMh8+k{1hh+QbcF4$DuMSi!LMtf)>XyFk1*UtIvM*cKTE&O``)*(x{6#Nwme5_|U z&AdwC49a_m9O4pGPp(F8DPXRL4geW-nc-%0Edf>KQX9^|Xd3Guj$*gD=~;%#tdbW= zsswq(LlY-R=jcyMwn?IR&M+o95$Oc2xIXdopG;4G{X3j#z$Rzdb1P7>mpFnMj<30o z8n`He;ddG+M+UDE?i(H_^@KB}zE!~s0wPrbr>ut}yC`ZyJ;>A=QPDsNbfYc+9Ty;j z-}!S3*=+4&s*WJnx&?DKRrC@R;Ss14SyLk^jY3X9KDy5+n&3i`fJeYK55eM z+vb^_n%2r$!kC2f;<{pP(U-PS!zmz&)IoZcW4iq+k#g1JU<4SRS@Nyz6`KW6iK!$Q zbSTzgjyvg(Y)7Bzso8Y){BYQoHgvK*Sk*IuM+B=1oWM#7$uDmn{o;1XF5zvmRQ}=j zANHrdsI)!-V53tn$!ZR)B#)WnKQ?`73bN%J0LuCM0SvaeG6{6PeZygum$$OAw=3j zZIXmc6K5(*^CRizb)7SZOtk&KSz#qfqeQM@-OXM%AT zTgRykn`dFwIW$O_wx29bN7C0>lcdwiuUKnjgrE6rK9lyy^P{sSj!-0AUt5Oy z#~kG*WWWb zYxRjhIwJJ~TkbQuI?=Y%9Cq%;#p1?IBP1;beObXtVlhZ*!%5?Ehu}68b1DlES=T_c zd@FeNoY?U|`)kkKEN9;{<78cjLHJo>8RD!s(g0=FDi3M0LtDsZ)}}~-@6sGCC%H${ zyVO1EyAEuHpWRIt5J7em`rM6ma<5S;Tl+cE***3v&$;owlxg?I$gfcd0j~}VN|)3d z9-ksLqar;x*_)(t9zu=^!qtZs&*jWf?4e!D4u;QmHVTzqhTRqR0ao2i9ot+Wb%7Q@ zJ1CtU@`w6{x~Z0C@B)j}he5gY>hOaD3T(1I%qILMG=iIz>TfBjEV z^YP<^v@bA*7(m5>=3q`csnTwtXpIMj>B2+eqdYw3suX*=!+CJKG(djo1;spbPEIpx z9H|`$r7c@*Z6auw$xtA;)nFzyW*Q~ewu!<99+v}*XT@5U8PDg8cEc|Ny|k=iEzR9C zC#wa)+o%1~_|cAZ{5T2Y09p-k0Y)raXi8fY6hvMOAgzm5Im57q9k9E4+6Nf5uYczm zLi!_w5GlYXO(Mcd{B|FusR1?Axv|2tBDfZiG?9~90LYk6tOs8J^hZxkz{?JDqOkQB zGU@OA`aeiN{QeKpkG}gtBX@a^@T9HUcpkxOc;gyts=oEyZE9WN+Tk%{kOaMIHP=kA z8;%Y4<=_H0->v2q!Bn}vsst6fVqhUc1!UAAyLSF`jDX(adgEtLz=Z6iYp=b-^_X77 z=Qa~2(67egq*j0v>NV}r3893-?7e3^`j8I5f_i@o7~odIvWC|>2u0dgvx#-BV61M> z+{FMF05>P17ocfEZ(Y9;qlu4V9PKl^g9qGY@nZ-t%mWIjgQokHX@)tF_naYZKp&7L z<#s%@`*agHk>+U$x3q8m22P!c>3g>2 z$)R{Rm+1AXkfLBaMAyFk%vf)xy#wW;BGJC!Bgt=#oE1+O> za)J=(68W@Sm90HWez>QB% zrm5)}oJ3eB3g);hRo4q?74N?|PX0I{#a;^PT9LyJz!n!yeXLmr4*vl;UgJlPq-&S% zq*c;OnkoJ{dEyK#HeDVLrqVC~xl0`eXvw17i-g2;DB6GYkN>;$?XP|_42zvokTmTL zYX%F8k&-dd;rDA_`9>Zs@^BDGXIodQ!pfa{;KB6z4_>09z&N^tP;-f#ZJgz80Hpmb z)x*Kk@_ErZjJ4A-)+j{h06!HYWh5X{yU+#JEf{KU2QZqytKl{RkmFpL{-EJkuLzL+ zPk^QY5+J*SrXE;aY?0_5^$gwpC{)gxV9xZRJRKJbc6yPaf!!`)Cv1tfy8_|Cy+{O@ zYS0gRyS%!9E;3Jfk8T0mq#UsS$ocgfRvy;nTLuobGT>GPiY7DFk6=l)6Tuk#4SsU5)c@UVpHStb^d)$WdM972_91Lc`9v z!4k7MhHV9M0GV(~_?_C^-^(xxc;ojRB>Kd*Q%G%L*`~#E{CuC>R~Ql%Jpbcbf)bE7doU$?(re_G<^IB59HKL=oAJg4pn@wMrgm&eRZz!s`nNVh7GL@DkAryXqzOsv zL_`o))8Q`}?5CR9KK(k1acGF3X#4HPA#xh!H7zH=?Xy3T!o zbELwQI79Fj_Ggd{ohgSNxew|gBTyyf40+_Afs2}2!5BUV_vt=!Q56Cx4msgIG3;KM z|9S8Hn^V9ABCoefBqmyJu~z1JyB;l_olJ0F z41e3svH}q00Flx_3QnZzjSXA{l>3GVzcVLvp?h~z$By?&E3?lhyAN^R{ZEC47 zOcG13i&GHHux^z$%i0PU%D_ zxPP9_wLm||U?@I&PrHvC*pbWqvjwYm-PEWuYh7U7n{gH@*bnu<`pP_d2)ST=#fgG) zi^FpknzVNeP=z~3{snDsu&OF{n|M2&2lGlemX=YpC=zuGB5mL+;%)&Hw6g*GFap~M zGMDFYAjp5Tb%hRy}>xM?m4pKo|PfF+BhAyR|lr$I;ihnA1gp{(1u^(owGJ>ST>92#Kzc?MSv#o_sh+74fJB;J~;pa zWxuv*ZRX)b0^_iiVOC8Ku?3AF*$xjXj1FW2XJZ|>;d>1Ze&C=KzA2b=)R9U6m6?EQPM|Y9y>^3NPvTNieIvL z#u?<;O6m&tS|tQHfSfcjX1hv`wvNm?0L4oAlU*kna6sxq4FI*ANlQx%3$aK8*0@IK z{q_!?C(krU&fHeH14WChkO$gU97VAWr7O1>rP>NJ3|Q9K>~c=_$brn_dGymcp=10o z-9>>GaayvS^lxQN-X%YJ6sF&%?iPC)wqN6{Wj%jJAW#zftjm3;PNesyZe#SS>D;kn z=_llkwj!5DMh4Rq?B&+dUA+B`Gzt5fhXL&bAQw4@b2!DV0InmWqsU?RW|qLgZ|t9Jes?bvl%kP@0{^ zfynW-inj!qUFGw3apaCa{y0w8IC4PlKK*k|#i^{bW;1i_v-!Aq_@rHca&Z_Kxy$$J#kc&Ldv#sI4A0n?4^ff+z{{v}}w3V*?_Vn(bn4={G(`%?7!fT2h4K_>4H(sS7(LH~?1~NS zgUR7ielHtrM52J4HUqddy0TB|KtYt?%KSqOl4rmkrIEIP5!X32ENRZIXT@A^nL|^| z!gzDX2fVL^BHC7TrGUa*xxDv-44C3zf9)Kgbaxm1$T`xebRp~crA_31mFZqY3~Apt zGJcdk$V=E*7)@l~!#E0nyLDrhc36El2kIt5=`e@`bkndCsrNtzVgvwSK%c)oj;lpiuC)`Xj1b6NZes6Ssq6|y8=b2> z+aj?GdD>(>Ypi7_Iz`>zN;^1hn2Ti;ql!MT5J~phIvJD5_qkuk(PLku%%_DV`wE$QBuk5<^%3h-%D|*~I44%7<_sJOhW!iup zdUQ-IF!lMphI_?hvp?Vu_Q89H5@4ISF3&52ti5RA>_&fMex^Y&itM}jwPqt;W1WpD z6-J2tg*L~(wd0b6=F?VK7vJZ#y=jZPW!!8tXYO#L=q=vJd(}%gOEPj^%x6XH;=O#E z1-#i;R%gsKJO_Rq5E0qu+D5@K4uDhSSovi=Ofv&*ic%ZGV;3ZuKd~5VlkEhw`#WWm z6r=#z-ij$c;D-2IjkaL}o~xovvzArJwpeAbl4(!TdzbUnNP%j|uzBUyL8<>__8sC; z62~B~dltI0rkiF9@NrIZwK#o#kzp#FEvNaJwiDeq{GRl_cp-YiJ+oui``R;}(?Uig zQ4J@BJa}-C&s0aQ_aRieAsd@WmkYcHz&CQ_=c+e5&?B(-)pYE{v9z5d{|}?Ei-9a4 zOg>NvK-8L{eFV>L|4#*)-ypPHgpDvc*`BI;>4$h*0SK09a5-`H!|=1i}$4?T|Man?Q^!)a0>3Oq|93{(UOZ|BA%g!!{Cytu=Q}2I|X01N+25P2nd5C zfbH8CIn8W>`A+OxY=mJvU#r5i^^iRxooj!{{hP93PdURSg2!y+rkD$CF^l;w05R5H z@SCMs`1akMRP66fCt)F%Zf&Of9ympT5c#B;{TK7577Nm7`HJrL2z;H=uOCasGxQbIGIy> zZ}wrxRuK9FQYswI(Ldai{^#HRKhppAJAX*g)dFmVMh3%;@;A(N01KeeiB=@aj0X?l z7w|jAGN$dkZ;15t^q$@S=+MCHOm{Fh(J6i2qhNj*kXLh<1%E8a`0#z_F~TkB$v=5A zWl!9j?tS2a^rL54}@Bxlcjx$<78P@VO* z;jVM{GnN~rw{_=^I-hPUgXu_d4E?~sj1BVNaZ39-0H+k4-MaK1#WoacQN%cL{1~ir zA$@$KRzNdVXxvahiAcI^1p7r(|?g>}GL zh;S@MtC{pI<@9aV_1K3eaqJJ%qd)&KI&ZuS@J8utoMFWztyDl!V1wZk_=eeF|Lv=6 z_mAOj@{C!lH2_Kr;o*^^qW}ncv7(D??k@SDF*QBMew5KliPyKn)H-CP1c;HGs6f5= zZ}m(koocp|y#_5-7-7I13*w9@F4h{JR1 zvuB7sWtt@ppgp#yuHPUf$2tq@J9s4Xxp!a-^8nW+^6!UWsqteVLt!g9x7+9%^|h_- zObOhC`Q6(_222CO?ps8TS`K1E8FG2F*>=zo(6(cnT}*rRnp&Z})sTyAY?=07Hdi~* zK13voH4_B6hqAfaYr&P)TKBZ0kk|a_-8laM&bC%7m*U6Ar)A_7uhq#^q(=HHP zjB^aI!afR`Br62c>J06DM4~*Se7^S}+nnQG126E3EH*|ku6>JEVtkKvkNpc)k>~bc z3DUGV+Uz#gaf=~95rO1-Ekr+fML0IHW7Z$AxS(9pKDhTf_f5bN8RAB~@y6rwJ?jD3 zHNsfKZpajyiUKkO;PQvhQlAF9D@bN7yce;SAe$R!lh|+^S(v7VoTHQWord8(DCo#= zdV#g`G2=t-MtYXh+x*}jPEOLNxi))SIRIp{sVT;KB zR^M1pb;l@;jlilJQgt7ot)+7NBCNV3hcozZrQ zMX+yRi$cH^%oQ}$*C?jLPyu}Oo=3+Bp8@KvmTslGy3R$*gpxM-Gx?zh(mX{EcW%!D z1^~80N6EFs>%}N+AUp&T3BQt`^4fR8GgW@_Y`)dZA25n~rRG7a+%%v9+juta&lC^C zi+bk`DYF=ap$`~)1Nlbm@ULmydw8|R~sQbiz0XMx(_#&(*N);{%Lyp$){*v_AGdJ5PQoSz$(})*8`|i6fBL;$Mb8{ z4Bdo5*3!Ag<~{K&{DTk{?@?i~{wl4=P2_s^bFQP5WRcBdGVEKB-NI-*_VG^uyqeN8 zfBDz2vwhK0vt#l^YK4(0!Jt)9WK9@fm2Vr$&YqInSKm!1Xr4bg$t1d)w~@hYnwXr7 zaO9W&!Hn{g?kFP2IXqkspAL=ImwqcRuj& zLm0Pe`cMDyi|K#=zdldi+Zqvv>%qYP%IE%Wx^(58^d6w_Z+-4pIWN`pU%&8G+RXjU zvvqy zQz#H-wgun~a*w~e`efPxEUDO0H`VMErx+Cu zrl-KlW9^5=*0UMf{-N~Z_nzVW91AD5nF1^amdL2Ikp@$D ztyvpouovKHjeyZ1(->rXBT~j^Zxe2`B~TQ;qGx4nyKsI4NJUslnMQ%I&Ef@4-mBNN zfqdv^UT49M0MGDzD{H&Spa~0Cnl!rHk6}WYm0kNXvw!OToVpE$Gp9x2+}Xv)36*8i zZtNAi!(JMt+Q!i~BjOUi;^*|v4P z6l&&Sj=RyvL9(z0reeq}$>^?DY-8ian=O z=?>?+!_B9F4`X`(sIwE1lL6aENjRWB#=#pas}2W3F4)rsNkX?0z(cF-Ir_|&WM$5+ z!;VZzt2j!K`}15Fp)^&;e&5^#Y&iA_8*W~yuW3f-+4_=gWpBv0rAvphu|hY7JbibU z=GMZo-jG=08ah$-&UV1Z#Q4bjP7?3sd^wz`h4ouS=Q~_r+in{?PdkjAtS=H1_cF^h zZ3k5+O+~OF<7afvzQg9Qvh#=+pqG2Hzpwz+R;YqYPfZgnu7J0KjZlv}HBbUW>|oAq zj^{aW-}wH(i$Ep`QpGZop8_HsH>_f;fvr}fXo`Ocm@JGEz{Wm< z7n(=zcK}d61ZODZ6CH1HD~iBP<35ow&IF)~3wRxV#+eJ{CfJR8^?vzYW|!_D4W+}s z!9c;Ddi9tqZa-wQuR1*rpQL^kncribi_@i?L@{|RsO>vRQDh!rr0@}T3ygg zgxQ?kR-S)LaLnGuz41DW@!E-aW;man=!Oy`NsUp$>g)K@>5;TR=N;f!kOz7)%Q&usRB?#(Pcp&LSXs zw{~b2!CjZQS zjPo4+=_@x#q1b^nqvZ}j#|Hk#AAF2lV#4=)u2uO>D41PxJnu|TrQ-mjthq35O3?6v zD|44jqmXrOFpheOhNDzu9taJO2GIK!yvSw?f@S)malC@aa!K{eYM2M>^ZEJ~gwGtL z#vlefeR4Yy>F_XH04~u|2nEPWGI8o4{oLdCr!!~U)5-Jw>6KUCW=hms>Hhmp(VA>5 z4G{7&eWHofh&=?cr4P@uXE=T5rFYUi`9V3Djv9N}g^}!}lD-T`a6GELIEx6LeJ$4r zTgnR9KBudPup9U7&m4zoAxY0rFUrs!sDw5M<7!lgM~7J-pNmui;xN1GbLlgm`-OD& z!Sm_MfAnV<$!X{sI#B?&!T1O%#{l^;Wa3(uS;J^)#-Tbyk+ISS9%4=XzbyNwL6$90 zQRZop(@n9f|UJ2^;4`qS%gTuo<5ahSe-mman^Q|Ay(^3j+AEm?*G4=#V_ za0tmViX+($i&dt(MJrt_YWaN_tr^;~5!U2j&d%LNC(*~3Gx5SxPtg;!EBY2sPu*mS z7uUuMEms-NK>_3kjfEKRef$Q;f^D^0SuneNt1i#rc@C)KQdoU=l4mYm}{ zhKiz6y#E1;re33CLQ9@d4ht}Ta+2w5&FNhL=MraUla!nm9F0Tde~#Sa%JNFO=To0d zR~By5qjER>%%^@X{q@(LPU9CR(=Yt5zmYCqze02Iopk>%j?}@WlA=uTs zPd$yi?4+Y-deZXJa=OME+dZVWcO=;2ng8;Arms=0%ektsmpVBcS8j)s+J1b3s$zz$ zU?M2T3OO?cs0l~V97T&+r$}$wx;jPF12z|B=Af1u4oV)w(axR^9zBw-lk0Df)=rp% zFfiPYxy}eJV?zNSgeGbvIcMtj8Aqo0j12d4$ocbgc8Nv|OiV<+UoY&`U3x5EyE2!a z`uHNk!{zX9k%>!Gr+V^dR+%A!zk>7<{hRMYXnovrM$b zd{SEwt^TZY%66}&ZcSsrHIq80NsH)7%%A%7gjYL&^9=mL9p%RKc`sN(qa z9?sAYU&Mg`j5@i`_G9PHPlQcqgLR*Sl$BKg)`Jf|0H$Tvu`Nc!Jo^n`3()1BgT)e5uulPi1xkVxL#r5K z!>eoWUQU_5AwWCt;r^W_m!~D$BDym{Yd>SP%~Im8|t!8IzXtS`>Z@-y@r_Qig^5;r=dk>AZt z2}W81m>^%Y1}xJpz|=KK2cv{W6FLAUKAWjdcKNZ8$sFVgc2NQZLZ6?L(+%?{_-)0W ziUa(fZNag`j=4610D&J{V26bOrSwCu<1CzuJS+zj{s{#u%-}dbm#h%qP&R^dA~O4Jy}z zz2ay5T&#gi#Wj47KgZrSHpc7m8sw{mqTmB;phJ!-T*oZh81ljJ8t1LF*Cj%!DI&_E zvz%vR)jN$ze~mrSS#2f_N~ffib@3e7MlU~r!%uf0qXv>j;UI8#>ioDD!9CBMGwp}l zt?960*PTA-AdC$6&{W2;rC6=oMhoK1W&nKb9+%_wO^`I|K2x)DodB90`tQo(nsA+m zN-k$Sk*pK;7mVoDb1Qu5O>$>=55eGK z#A0E&e+4C`4DoP=IFv1as(=TmQ8D>&ERKU1-hS>aUPrDl9!QnN-`gXkiSQWS$oCHh zVi)8i4unn+9xfQ-$Teg{Tev3$$xuZOG}aH`RS6sb7!){1wB`VW0m_OFM4XebM&UgmHBm&=92kupY zH+;z{3gu>~-HmrRhm%}fh7LfaVJktgo{`gAG!*{MYZ_xvw7j|R!dY0izF@;wt#ED|OfS6j zY8r=;)YFq4(y@>|+r}_kAka@Jas%Ue2s56i8rc4{Yskae3QgltYV8~v`+b@}D1a}K z-<;>(yD$?ERMaTe^b1&X|1FcFFqqhm?~$Hf=1_b7Mdt_}9BNWqo)Syn370U@saI0au87WTj* zj*gyA&p!21TD~=vRte<~9-G7_b1-@K3eKJ(YW!K)U{f^cbO30r!U}ew2xY`uU^B~Z~!u{MV6ngMNAtXkkz2UzT)>@Ga=k+eTYn!~e$~yf+k^$0kHQF}2MqHUQ7juxgXcH2En)Z!^ZoWD(8#OcVEpZJ-|t``g-Et2hx7u01gO7k#uK)C0ZpKDA#`D^G*`#oEHB^WST zQwGen@of8*u{3@AI!*!A%{b!9vuC}CJjs%FaE@d$)z41I(|~HQ)QLLB7Rt$>w#Y>C z418!9)_oZ)?!|LiN3N|Pcaotjv9_Vi?^*&ZaMm>BT*I^~jeilg)Rsr~xi$Q*zVD>K z%sLOjk0J4PKvW(%s*=y!Lpszf=c7U>a+DT@Q`5_-9Y(xG$Af1!KhWVF4##OHT4MiT z?XjsGo3NecFYCk=i7ad(52kfl<}`eCBtWlrzl*cI1k3sXdu{4d9>CDd`z&O|0z?-9 z!5msKbSmFqNd-P23IKH&Ab~8ek3ZTA)(6Yz=E1nZ%Ce_0A^;lKiCPDUkm^nk!in?1 z@7*8u3-M6)E~Pt{u0)EA1c!q`^4OR8`MIcv5j2}enKRtawahgYAQX%`Kn6q}BWvr> zjL4DX9xANkGR2d%(?{rm4^s|+l>{hoU9Vw8p@cCv<;zY6{;m^{Y6^mV>fNI{I?t2_ z+ijN7y)iTdoo;ToKbNi5ap{5`Hl0fO@!EC=Dbp6u!4d20bT`3~@eE>-;@-7CrY2g9 zsEuo5jg(1Qb-{Qa_+Up;2Jj;rB1jo^TN1g0>dyiPoX4_dDE00|t2UcuJbA6tS_R@YW(Udw*W z0%#*f%CQb~E3cEKHSN*0Xk(qr?61tah1G)EcEGCLM8qOa5zHcog6lRM+Y;F95WSDE z@;{;=$hR`7L)kkv3f2R=(SDtPZ6Mz3QGgoOu@%fvJ2W$vQ*%n8z{MRRMe=K)nY@PEj1 z<2jYo$j8&*AruA#tcLaTYV6S`K7KCUca~AY8*>y_prjG73QTSh<$4^3q@^R5UVh~b zs->qHDS3=g+(=qs|G)ZQpG|{jPElZG$j~Kb2V|>wrart?+t^uXQ)5p`D10MI=6JdG zDsw^p2$lZktnC@n@|j>_++!=b*;3Yn=i#6#(6Yc*SzDp(l(#27@v(4}ZoPLkJYhXY znK3;DKcfv$V=q$g%XN`ih>^tpUZse#tkO!SnQ&k)Y;s1Aki!%?zHCsQ12=+KX#to6 zG@g6<84B|@(j&k4X$CToi#k3Q{XS(4ZEK?;mQ^d_k!l=lD`z@~BQ?kAOWcfy+(8lL z#EFSCMvidrz;L>8V~TJiBTrfPENqeN!|>Qxy7JCDX&ocvdY!%hO!U8AT3(LO`4%kc z2CUICXYB1uS0fMGoaf7zuko|BG&DXQPTf0iy+^w)QU#_L(v4d;SxY={&()H`N(!-BEmZoQ?Q%1%CWhn#J4%@MIYYt$@AP9hbh5eX3OOfK@ zGHft8-ZL}YC+(%CXHpG-H#mAUn80ROIH%a`s8k8x%0kp}mgMnp=AfehLXU0ci>ie-Hr`4WY3<`e1xYT z!-^r`k3(Bb)sQigt+lAih=Z-JWMhrCSS;drTf^y|Bh!%7(HeJW+?y6*Q+OF?x3GTHSjc?q4cN&c($CJGorDdd z4H@jWYqh+n!>GPARB8?+M;l-l>W%%CnXZtlYbuBg_Zn#ljgB_t_`)jMO+x+QP>D9k z84GdE9Tpta(C54@?9&m>rtITuKPKYA`0g42u@$Ew3Vz%R4IQ!{V8Ra0{@#0ede@%W0Ulh}}3l9CgGaGB1eLZVGty~Ii1xRDKJ#yNZ$D=x#^oXx+E80t3Y^V}jgiMZqv2|G zpuiyy&@oT9VMC3^n^r;>Cp!dyzV_K8)5!!ZWq#*gyH`d9beg8F-ol0gklhPy82U&V zazC`8ZjVkJ|Iah)EE*xEuQa*Q_OG>1e|&|VgvDb`wO1?{U#3*@KGxJrMw$p~YvGrj z9sGHt3j{CaIU|*l^{mFh;@bS@85W;N`V5R5;fztI#J{y$wpNX_99|RMK)^$B#yEFQ zW{floK112D=*xKl5^OplItU!x0Mj~G4w8z8^|p~|Wvoqo=Dr~#PG{uc0H9%CeHh=* z=ZB!Xc|NcDUMEet71(o@Lr`Q)x&M%5Fo$ay^6F>$71!ZsJX7O({*dpvtm}dh@Lm8o ze;rcLtWDoS_Ki&26~UB*NL}S~-Fu76O+DZfgT%Dchv{@udR;_2On)-^WpQk?K%D2T z;zS>=V{=xPQ|CSRg1LAzwpSv{9ADFi+t4%iXs?4giq!rXT?j%82d}lf2HUwDcD)KA zYUd2oYbxkWA4^?hXBktye6(CImCBv4i-YyyG+UGC6+uQha^``~_oc zSd#ZWXR5bpo=WFP5ifDmG}9+n>Jhp8vH-_*3IOeFslPAu(q(+H3Lr0&wp^h302BLW};M-?46neq@964*k8G6Tq00 zuIC?u%r$)$nJ8J-sD|}D>M!1RfMJN&@^V8*f}b24UImbR;72TzDFp`{>rk?_Up3+TpRD(fQ8#2 zOyA3x;>{{75(oV=pZ(d?OQFU$n1*H13R1>B0haj;bHLm`^?-uynOE7de5|+YWhao< z)Po_8dFX&{2diXZZSC`nWlF;U>#dc=^dmrz1xv#?IXfKKQx9DvU4=YY4F|H-BDiL6 z+Bmd3u(4&p!s+wpxJOcc=B6?5+cew8kWmVJ<83PS0ZtWqzDD~WTCjD>g6{La)rGXo zWUeNR!QgP7ezp{nY)+>RLXkW~6zPoJcOqRTRp6r^e+VT>4y23^;r>UDPNb>Z(}V=~ z(uW><5SD8=J^B0pIX&@HpJFU1K%5cVKX~gh-8e1)jJKnhX5iR(+P(2EGQwVPk8i&G zD)+w41U7PL>v>Wo_R@{H%k)s5N!?>(6vDxlF(9M^dF=&I6mj$};Z>{PdMN@}fz>*I z(K;Z_0z?$&@sLp0B2)NO#(9`-nfoI7uT%78E?-;30`8_Ex;E4ZOD@lnuS~z;a#^m1 zN5(0UxjHgAm~PG7Lg^9e9qz}oM|bV4rGasJf-ej5VAPQh4m}hso1&rwZ$6 zoT-js(6MPas*O;n&Kbq~(PnC!?iA{P77Up(HZpz$`8EfAmsFBR(#y}jkS;#>2w`nH zWEjo^z&O^s4(p;E+WyIuF~=ioVKxY_o0qQM38T!pv(3{7tgZuLnmM18fCM;HH%BoZ z&+TMQA(Z2$3+Q%9ybO-5)%>#V6{eaw#KwCN$e7P5i{(I$8TzAl!({CU$a#(yQg9rg z;{ME0WGINS1y&R5y0!$%&OKPqVg9L%^#-7<2f(dyR|hUr94DzExsqYu+mS~@`$pz? z5zYW9Q>@#jorBmz+x&T7c^o2#<=8{`Vr|XP z*f26r7Z&=h?h-|7qM*nA^n3*DeM9H^H_M8Q8DHRW{eyEMh_)xJ_S3vxBcX@f%MR z@e)6~gkE(0TsJDbc}DI_?@OJ+nJ`6)gNeLgdw6d%*SF3{r?f7kB6bkWa&^y}y~S&! z6gIl7@N*wbFHVQcoO7K6o|8jCPA*P5#7hX4zt>Cf^ZiWLFpMKW8;Z6e&tAu87L2pD zArOA=Gb0P}x7X(q@q6yyb6^Tgjb||DKPnxu;W}4dOPLQ4&s)@$PKlJ1O$}Zi2j*$CaX0THg&!ukhE0J_3*k%IDJ6sFsyi4m( z96vreb@Tblue?T|_HOJ9W3kaI%BH<~b;8A+&7?6zjTDc#O|h&*S-O4wMkB1pCaIuh z4Fql$e(7oEdq)X*;`gu?ErzjC2(pVR-VBO;i-dQZq&uxm?{F96YN~_+SmJkr)u@Ja zA^kuW*u9C+30{KWO<*Kw(ny$AqU;OCZJciF9a&+G%@&6{18`DNm}950;3>m%WtTYI z*5?`k^Wizzc+}>+N`GIta31-fZN=Cq#}nbja4Orkufvq_jDkOsGSe<0B#rE)>(|pX zzwaLbxb(NAlPB*XEeLUks0GWVX;AP$?e{s&ZHut2bDu|Vp75u6s8evp~ z149xa7w?1DnJc81ZZ2NrRC)h~j1)R9>iW5E;q7t=)yW@J{_6c&y;^6{9AVkR@Q8Ew z9Ot=SC!GPW62`@lQidL-nO2Mu&Q&vgG`G2j!S2!Y_y6`MiF^#CU;KyvgmzjSIF$U+ zi}ysahJ9-TP_oZ5Rs)14n{fzs2p?E!YWOhP%)yfC6?Yo0AcxSNzyqTLC<}7FF&J$a zMSE9zm}IR4p*r`5KRO4CgpQii5~(9~lg~FLC2QAC>n~1I3xuT97?o)`hi#OSivrt-JM5$YaUPK|ZrFoo> zrG=Sv{L};;Ovpt%dxHC|1cN>aK+3mvrFRcsNf*Y((jMUH>U+1+iIbC9qX27(DPhEr*!xU6b>>*g-C0cA zT-&_G1Du{S=g-q}YdbBy^HyroiNj!TqG;P;xF_(G-{6{Kq$o|CJev*yp0a69Av2_9 zR|EUawhWD>MWPz}tYI4=Ps5uk$MGX0RLu8Na5S5m4UJ=tdH^%K=}m(q&93vjn|oH)zOP-qPxtif3pm7JKmZ(y7D>yXMTJ5PiJ>2& zXj>6>D8g1a!oO{Y7cyjvk}Sz#fg2za1VwNsKmr5IU;xbCGu_jBcXijk@0FRAnN{WI zIf+pX8r@Zu`Q`WBd+s^!dCx`~Oy3|=wk|w_12GGz-C!-d(=6#j64IJwhuCo3k+lNg zdoV3+Jxnum57NldXn^~zwZ$|=szw&p5YHKwX&eS-Bn^^w(Sakg&;1xW)`>14Q-aKO zTCIt7EO=R3q5pOPri}9$ZGun$Ro1tHo{+_?Bf}^@_MFrTdz$V5xG7GmZZ4;FY(mp# zL(d(exYvoEm>wViF2P`)d-ig=^Q|8sZ)%^mtx`8d^oaxO8P+b^oPP&-v9)89LQ{>t zW5G`dm&n>$eEZ`+{c5`Y{(EVXsKMgGY{IRFVpw}g}9 zfC}wIg*EKdfxw`4@gCbGH35aaOp&bNyb$+k0|9LYdNDwR38I1meiTX-o!Bn?K~X8| zDoX)_AOZ(IUc;=4Ad?>Rj8F0k@ zm{0E5c&r|I(ex@snGFy<)!9BKEvU_kq66Fu2qV)G4oEMNY!HARvL>2TjfA$-4lEhK zcY`9TBV?|a?j{2(;y^YfH7&1Xs)R4m(fgPq=qvKu;Ph z;-C|Y{ItQQ0yVsbUu|U&f6KlLhb5u}PLcY=y4dYP@=U-hU{?ob*n1-?Rcx5GPU=R} zI$EYi@xVG9ynuD5s|4>YXjcG;j^>r<_>t$yWAhxmriI;Xq1SS-liW-&slgDKd(6|s z^J4pS_$2r0bjOIbpjM*|nousvf}hJR#(TV1W!S?ec=k(K_6&d{LMXUaTw9FclDMw^ zn`dSzutQLc+;2x8wiKPC)72gz=Epz?k>Bsf$)+AEH#!~2Vwum2kK#JG!Qey2471mg zN$c2}3j0*y{MtFh56+TumW5R5BMK?GvbR7AE0M6W=NtQ|+_>La**#ONIB%WUlp<%% z{O%mc*vW#?tiW~P%l(rK74%wMjD`WY63(}R)G(Bd^93^1U6Oza?-+t;SlJ1C#TW`I zs>%6;9$sH*_p&tHcFX~`9f{%kP7bOJtwK@9@LS<$!GO6sw%^g~QTT1)qJmVp+j7SZ z5X7CJtZD#BbF$1IDjAw^ulP^ED1a=WByAT|bm3tj{aoLycV$|D8>XFHDPJW}yCGWK(~=`wN1XO2xyP_=rFX7Egv+Waob)N~Bspki4QC2MJjo7iK04wn{C zA_I|gsA0+CiTiA9Ei!^4Y_qsNDDQAH5Ofcgf}#9*{}KsN zjDoUI@pugZe=rCVtlOB8ei2xHj7+90`J6RKZ z0X8?*5F`LS>%WJCBV&ZtqU9TG9U#Vm0v0lM!{qE^5D^GuYCnut``!yQ+aE#@@Z8z5 z19SCQgl=;4R3=-DunMI52v|7z9&qxbG%1 znF%HXDY@x%=cpOR z%rkU>>tF;rIrnA2&xQUW#tlD+u&^v-iM55KPQy433jl1)97ARp<4PEF4l7rz@l1e) zsUq^1=nTLaDbs0Tobx)cv_uCE3IhRV9qi-k2XCeg?y-lkdx?9m6K&{#eSZ79ACTH{ zCJn$;T>JQ18XtwV0;n&~Zzk%*)4}x*(%6+NX%E&@{oKX5Z*I^sX>JZsT}xZbFuUN6 zJS@lNGA+q!z3I}ESLmYh0cY_zEw8Mk4B%eps)6w@pj<~M$5WXWW+y~uB2tLas$-xn zgfu4{WfexGMHuWD9i_pvpJ{~E$YDPHs0K75GI2rjl0fQ#1;I=)pl*BP3rga81Bt-x%!$X)xbl^3&!PY$R)h zTEv-PBUp3UnSFj3SNTbsVxo3MD3{C(_)k1)n3xxgfbKEs_6z z;Ub*_=9bAthq2-FJb!lVsd5%{3ZrcnAovu0e48i^jjy5lDm`)Y=tg@bpJ1T70A_}& zeFly7NdbW2h6^4z&tcfPb^s^7MXB<87KtCh5CCpr&a9I0dvs!0bYFwPj(^LbwJ(zD z81$F~C2$6Ok(&_dShh2)LNe?@4)hi%;6)$xV;kLn!IlMFbqdB00Y#>0>~Ri_6511e zg7M2vL6of6g9tE(rMr-wydXM&C;Q*h#|TBHO)L;A4MU?Va^FwrR5s z8BY~!VnMO?$Vi}_54w;^pfxEG4pQRvtlu^|V`3CX2)Q$wD9L2niJ(!U$HGjbcZW^% z75k$t%E8d{N#THX;ebelSOo9%Lq{BUQK)7sPIKvX0;4+v=Sasr$N8ucQQ~Ix47t;G zR470I-^G##aN~ff<9KGjHC2O64BOF8r4#`93)T&tqZ7+Rb93lu)}Bq}(#V2_S3u%M z^Lj}KNjph6Kdj99Nti@lxYrYB@P0`@BUQLG>exVAfFC z;Ah?|*RX{h#+a}@-?NrFpvvkoYhX|N7%_}IifG9H9;u?l} z15ijIvG@8B!y`0ycJHd9lY7{!U4ZTs&g0(ZQW}MHYSqwtoFn@coATuuQm)-w({9C5 z;>rxqmv15H84gV+A-uze9temT!9OBo3t3(VMJPbNM#@>n6FFnik6@2U$mdYV zHtQ(~5ET{$T##i$d%eL5q0J_MD$)pe&`%gq*;4bGhB5r+C~c50azK8X2B`u7c?6rA z$D{7xIqD2(XyG;4M|g1hJh^QXFc&K*sxI;dX(L4FHo7+8xtbqmUQUK}awFw*>|UY7 z6;P_)GtLM?fr8<=?FDQ>o(Ja$AxDnbi1vF8VaaT|zR|^kdo!m?#@iPfVu~ULzKt)g z9fclTE0W?l6u~07$=uV^PhLPM){{j$=Ev0uneD)^IFTmO9#GcG^E3`XZzov)ycgnZQT+&x&19`5WJPFMf>U4RCL1m*8UtbTgMdXInfl|Scu3@UqP*zO1e zaPq^etov>AeECoov@!r3sFi>Gh>C6B8-{ZKEQ`s|`{^Bct2#9PC zJUPPQc%KkBXBve>r-lc2DHh145B~HUX=Y(A4FhDaj-E?^{-Q_dt z0EujUFum{-pHA~PZlqfu-Ha{~vZe*VjV+T7Lx}=hJwpKZ8r>_pvV>q^s}5nvyZReTu0FJqXB4gM2g8#zM3w)a3yUouJdbJG~p1Yk?zzr zKAN_eNax#rq{PrwFheRr6)|#RbWFy=@V1O@AArtI6*lKD0pc9ny0yBVrp`?R#P`Vw z-N7+CMiGaM?2=MEwAxCqhxUsD;PhMq--5!@m{wnLO8s3Kgi|K$H-zGxz2Rr z;LwMEu9WLMeH8&P=SMrho^^5l`nmwdfa(?@ z{&N@3<2aFLNM5y-;uh2#76BDpFZM*>Du56$XlK-$$g#RR{_s5U*(am-e+-e)%#fe zxPAwE*r}@3J#-_5+}k*Wj>A1bUTVmvDQEuPVsbk%Nwn+|VlE4=kT1(;;Fb#>?Y?@1$8W8f^Nck!`fkw z`I+5F1X}UMXNd_UNd(c+!x!D8YVce+)Z&o!wi|^2r_P!gV((zB#4{3GZE}shMvrlk za9E?AYV5UE+znUa+EH3_Np+sZ6aG|i?1*aj$>KWOgi3I^tICk?HX@Be2_1LB>!Qc3 zGR1yH-Vp05`)>r-)}WwQ))5_qt_%RnGd7h+_9R#wv`5^p&(HO|Z&L*_$egV;wNuT* z1@mQ)Lmf}<0$?8iU!x$<0-%O7?|q^(I9$92`SUphCYxw?;? zU?vO6t;Z3=`9_`k$ctvJ#meUP>#W%tPr_52DIS8l|yF2Klk!CAk56sEENW|%w8lg~j zlwZ?qb@uav<4k;_g4~KZ>G?dpCf!t(NfS_DkJ=8Z&Fgp)dfFz#JN;&qysNoK_fgpP zvn2$>!|hU>bSwbk!W!ltl&;f!^ul9;3&+*A|7%{6k5V)3~EXh4_0o|VV33GAMp7>^kT==AyN&}p4aOe(>O z)Hz%ZvRERVYJTVDJb5|@LIGv0r?7bU9*h@7Hn5;aP~m%cPdfl&b*7=xJnWVo8q8aZ zN?v{*765^=O%TWjff9_h1xT-4rRB3Lk8!XNtYBz4?-6DZz#{-LVT=tkKgv`O)COl9 zZ-?jcKbb_sv0l%%GACA|q8N)z#_A^JGuIX{euJP<)8YgG>$h}gyGzv zioc5CHXJqx;H_u(m{K;*W9QQC4{y`iBby$ZoK7!3`z*bU=V%ed5CgpTE+!%MFvy`@ zam<DsTmzR%(&-)a(eR2RC;2Xs&fwA0=;NgHpT#S7znOd zpn`vkoYz12+Mh9SC$IjPTzm+HS(5vNn5xG8@Q1k-ng5l z3DfHQTj{?Bi+@OOt2#_h135K6dl#1Jh_l#<6Pw4NWeNG0ImZ<`u9P_ELpZM2-+Vot ze)8Fry>vP5r-Xc&FF3#%`}nWFje?m-!{^S!wxj51pH?K@WATG`Qpfn&bnYjgPpf#` zEuJNZ!r8z7ak}`#3$S`?FjAXLv^z(i;_3A6pZ+dRYF8R1HNqBEJF8TzBj@HF7R(Wc z<+%FRt@P7B`(m2E_aH6Kp}fj?@9T62c)XkzXBTiF==DkTVuEn&g)^h6LZ4~7XUJj| z_`?B=R18i}T7R?{QG_f?(XJ=Oen2YEZ`(if=QT-An-U<7=xkTz1tcnaDF(YJcD4yNu*xLGS)`ws|#aG9*Fsg!|TE zbsRgNZvmaT!-sUu$dWqp)vvwI*)a{NtcxZnT5Ogr@^|1RAi*mI&OVcbcJ;HF|< zKtJujHI9IW00CNj>=1{F>!73sDO{2_^KU=_@&hx1vEeo9b&Z$5C#0JL^mTJDdw?Hk z?QrV*G2kVpG@dXBpcAI4!h24go*(zeFj?neShfuxN7E^9M_(=PknL2ZV1+H{B^(S{@sQN6E@Z=Q8MdF29X`Q95med{MhCaT*>zlYjX@-~&vc*) zDwXI>m9(Wn><)tn%ymX*R;Up&FV|d9iLws#q_rHjEi>w%GS6XHwe4A+$u17=uZBH3 zAh^yRBz`VICNb15Fw?eTf7vUex#mlk`Q0&YhZdrgR{w|)E-|@UG0iWxV@I&@0 z*4LxIk2q+Cy(?+Y5Gb9wZ=j3j6?5F+HB!OGK7< zHlyKA$TcVUn7!ADU!5g|iR=N3OHKW--KoWbhd2;plOse*urovt`;lGyT#pZrl9;=} z86eGq`-^*Je`3%WkJvv%Q8k;^j*{c_A#Zt`J6pwOxOJD7Fy={_hvG&`H(SVH4rBK) z1zH@Q0*Bj~+eN%;le){K3dly*IV2i{$i?FQdV7M_X#9<$u@_n(eXeI1#7SNDfHbt; zV&P5XH^Ey4W*=RoB$#G(&LM?T$9Tf-pU15L7*JLYIZ)=tm|vqe>b`k46wG$AI^Gx? zFUUK8>9NR5@Mqyp`Z z4ps?kbAX)5?X$=ns6@Og!FcrXP|@mv!-Ay9R!#fMmIyUM7&X(D2-aL2@St)569)N`XB?Gy$J3K9Jx54kfLunHPC7tXxO7B{!}{tP=_`!og+Zw4G@RasA;;)p^s6MkTRCr4 zr)<1ad1AyV2SrDt)#Q-y++(K+O&$R}P{upt5AJdgIRN|@{=wf#KLn6&%*?U^$O`?3 za{zs3bXN(#89`9?47ZpY3n7yFNuBuIPrs7B`u~0<-F)|Ydi3HM(s#IL9GrcaAj41H zFia)dpcz_}ftk5~BQ20X-%0zsgLRyu6T9mpG_O&p#Jb%-%UUB^IuFaUOq=2(@y%zul=j^ z+{>QH<{!MuK!lw%MF)g0{m=hPdiqnJW5n?&YtHZja&6cC zd^bG{%RlhhpGY@vU5_#0GDj!u^EL&X-L#Av`?;S>M;O!`dz)jDP4x~_#gN$s`SlyH za~T{M&qj4)iE+jl3ku#^8KMcW(2D>%TbK1weKNaQx(x=h7?BUrBi!=#@plGD=(UF!03V>BhV7 zGvy61eC2#Pm|aLUWL6o{xC$J4sc_dB%%clB(H-4C3jcbBkIAz(_v_H-Ago_OPOB(^ z?h!iAa4-)ymr$DIGXpwmdqhkCKK-1d1G-G?6Mf2|syt_=nOJ<3LmrE;&`oqHZYqWy zW59ZOWP>SUjm*#7M~^s@egL4n%|2_q%x6DFw+M{Q!L=#BZT(K4znE^m{bqWS{qCdh za+U_9WdOZ+;smg64yAQ?d4yeHa`(=|w6RJ$%UCbti;-)7rn4TPko&U}%)~j)I`_7- zOd%PJ<K|}JfFP{r9`{$!=)*8`%A+7o7QO@k5%fZ*^D2Q0&!h1} z*En5EeP9P388j7pvw<_DepF$)Ug#2StNTcxuEKA0kW?V{G!A8Kv-I$Hl%a# zfAKRrpP4^yh&IQWaGG5^Zx)81d+#GPY_GaS*A~-^bl|XwfCMLY!FZ(MiHRtNT3bMm z19Z%V?L@!qF4FOV=dXrif!*S7oq=8)ZS%G5h~Qu)Z_9PDps&+t7whSDEFkRkzF9|8 z3--22fnc4tVTa3np9Ls`)LT?U8$8wBQK2 zkewGZ*c-TSXc&hVMv~u~_bYJEGUnEI2}VsZ$>Q9Y_bzy~RhUsDR*?l{tp&dPiVp}( z;=kLe0j@RQ_(JV0_izX(HcjFfa!C2mK@k`Wwp@@rMdl}_O7NZ-=_}X^K!&E_v-5o4 z9z)Dpam`?6`P*K}vY19-nyj1kO}6vn)e%B3AR7+CX|Zn&_7WbQ`{oa?kCYJyL3lgL z5YJcOelo~)6xK03$lJ?t4&19q<bCF{QFZH>f&#SGNCz=kdI$(@I_+%%o1S zrI~vmelkNKzr%-!g7*pKTU;hctCEN5h8UtQvgnR;lv(7R!Cr0=&g$Xk>wtz5%v^(; z@`8TIQOiDoZKb2Jv9Yx5Ymto=e11azi{M>oy;r@lEU%3{Lufisnuhfbd0$j3^1Xd% zWe;K)fSv{Wa)e+ziq1ZQbwH_I1P~4O69vd-O0$N@ zP*x<|d_wUB+5%=nV9h38xRiEj6kY}d_2Ow4@iMZ@j{wE69D1QVi%M1*WH&=bG@xxj z$~|zHWjbAxwpPZIL!sGcLpf8p1bn(ZvO1qAaefmA)DV!lNVa0pOLAg~;d0kaFVe86 z`7sV-@NE0pM^v>x_0*?$ogq}1J^*`lM*v451vzdfIVy)SwvLHCdzzvndfgtgHb4B~ zy8-k%*#L{dWRuOiHua=VIHASQ%xj9aBHUAkb-wrhyXnq*@6gKXN!m$Wq?&plEx{bE zEpG!*X{{#6$CJ00==a}$pCTtlU*gT#jJtyIu?o9FxPOcM-cHgetnzN4{QF^0%P@jE zGrh=wqi-7+cYFV?1NKabcsTpU>(b;g3mLDehDRR;nw)n6!LWhQ}-aPkSCf& zdF^b#<|$V?X2|F`AoAv|2kFkW>x5v(&<(KK_a3rWR9iD7j z>GW^{P?=jH-J=%|n+_&5(*{VDXuwSDvqs14>z=mC+%VRHchPZzf(6cc9iwYA_9Gan zDraPbE)WHb?$R=?%DVTX!k*M=(p><}I8uEuSf>eV|E=dQ(gzzBfotsM=Yt*DTir?z zzWJS0B(L<4^;KUeOEQ5yuynl`<_*#yx{xFDTDv%Z1tJ^hNWR>E9DDv$0ihQB*#A<6 z*#X0*-Z!0~J;(uPi?OZ}CtduXmDy>MKF;WcrCalyEovVCG&-CoIPje#1MC?9i1n7q z3WLj0w{XJKcmTV%ghQiV*v1*Mde}6C=v&EomqA=zUXQS65s;H1dUN5*l{9nby>xMk zR$#p_ZRp@JK69IMh<0;9~PvmEl^V2}?Ht7&z zs+l%73#lRFt&_D00ii&TGs@XQDnX~$*rD5uBgrL>$o7n)56@ z_vOHOx*uJS>!Pb7r`8`s{0Wd|Q5wGXfA>gu(;xF_RN`%A1 zzg-LNzs0_Tl;ht6}I>mlOz9&{;qy#!mgDDb9X z>;a-45^mI6bv$l9OK}9@;2w%)_L15ug0jsf9OGfAfDCWeDEu*0T_AMRu}OtI&mxU) zKCBC9;TT6Y?pNR%N%(B6VHYWRTXx7kn_H#u+KR=TKGPA*<@K3bux*B1{i8tnyS*Kr zorCU1xrQ>ehCj?hu?js82(z$7(9I{s;XWqZ`S_L7Fh%rBMi82;YcM6;kGZlcuik#r z>^b~H1H&jJ){*u7&bMBR{ZJ9fsx&x=R!7=>!Ek`d^a`Je<1vJS&K({WThA;%xR)N? zy1_K6(=d6DrRm2B_X7U*bu3tep0ROqaoJCd$R77&*Myb3^Dz%Ayo@o z%KT9QHa$crU=5{(I5qY&2RN*dy0E&ikxswxG+=opE!@0?!-Fvc6zE;okufWzhw(x; zNmqH{)fdwbe(*-*hmH-=UJ4*S33Iu&M0yJMH$<3Lc3$z$gID=RR(BY+c9Pz;;mB@Y3@Pe@zJAlEBPi&1o#gQY2vBV9tyYW4Lrw>gt4 z2dq8MoTas!mF2zc(atU%4A|!)1uI3Ac`v$PKerX>5sk>N8l_;2LOT?=`)PWLd3Q#E zJPRE-s#!WQoOG{|JHL^xfAD}2vcpj<+eJ_0E@ThU7SdnH`8u*hzji5`VrkbO7uhBX5`2ol(u85q6>y6P64KOndWyr zJvM#{IWs2>V@^cGp%irhULFT`eTl9(fRzUNs-LLCID=PYq>SXLr;N;Z6OqWHYu45Y zU$bxO5Ty$J1ci39IHog8%iLm|v?hN}V1SiJ>o^R5j}djr z^S$%R5N#+oipmHmDD<@d(=(#gg2+X=MWV9c*HQJW3;jgUFLn3++aCKlL0qen#we z(N}C0`)a740%2sM!M$}NKjye{lz2Z_2mNm!-2iKXNng!#vXSxJl0HV3Ysjw-RTyr8 zm;3G>`yN3PYt(i&_&IW*kQqVnijmjL`(^T>l(;sZ$Ia zGLT)(aAMkyOW1I(62v{WF+eo-)}Gi1Qry4YYvT6M)fx0k5dyHz!8O0yy*dQ^>9ARx zpCJ7>1pE(K@V-Nyxh{6rsHACc$E@Lz_w1r^ZO}J%i0{!C0d|!U0B{VXzyXP$BV#$K zQa;lOcYdx+E6r`=XPMD)pMv}4^+pgvBe8ba*FfCf3*W|P`Z}=&>riH2Oc62R&$<#L zyRNEO!0u5+a*wrr?q6My37C%DYxI&tY2?^*uC(Vx$dIU!fV8ui)Xo+KY@lk2@ zn_SmObK&2nONbG5E+iQaAz$+XM&zlEZtCG@4w=c>%NhNk{f_bK++R%kLzje8;e#Q& z{$~fK{JYfgga zt7tT?c{WpqD5QpiY4xnn6VPIs(v<|Xp$zFG`#t5K7eSoqJMi@g9&I`|EUbj`2-eZBiHB6RA z8SKx~N#ORQbm#luO_yJMK22V{khbtX>lg}K8R<}rP7r3n@R@fufU!SD**l2AQK+WT ztO5QMxotc=1-OKwHUE{B!|>6L3HHve6H9CBsXR&hu6ux8l!*Yu97`2ifvU|4Y}4WH zM*7?L~E)I9HXt&3$X5`F@KJW4H0rVq2KNS3}_E21INv^G~T1rc9@=e z;zIi7H(yWByzm@h;>C37Y#~h#p5jLG=~15IDk2C!`BN{a53YWk-hS9enX1~v;P zkTWy)Xf{3d>@(?&*T0K_>1VRolaVg-JHPfX)6f6CUnDi+N#y)6efM|&km~k}X_64~ zS=N4RxG(+Tzx$2!`N>o1i@*5uk(TtE|HuDH;ok+;oGDiTzV`Ysh&nNTco=z!HN&2&J4NFgg_gRO)U(c?RRQ0Uj3Z&PQZAUpY04le{C+EB;ZO_sv1OWKL$el;XyvWl$Q|%#b(^#((EQZdh;5|dC z!WqF{KaM>6w?v^?FW}pJ`!1Zy40@$N>yUnKgl5$9c&H-n%j{7FbHtAoz^sLXSmL)klXAI;&_ z!HMaQoK@MA7QoupaO`UP0x8+B zBG2!aAVKLsGVb~ltYmeOTuHg#9W+osiiuCVZh?IaTsYeX#21e0IK8b%K*?#av#T;7=`r=U}Sd> z+dpP;1k43?bhmcL!r*O2wn}tZcy?-fGDwOld6kk$b_yzxV=Z9mX8#6oc(?gHozN0M zHHR%VuY8z@sCcZyTusOdObxmg<`~`G;=O(qu**))Gi_i6ne0UNsyv6pk0gzb<}UB8 zAVV4IOsqFyBZvmIQ&!kI_KyEUE;-S$Znmrv)OuzDpbAtu@BCl;dnJw6D<>sns?EG| z#(0hAB`)HPBPTi!N?^l~j$m0Z%j+bd3jEu15_LhIdp`3BtPsM8{QFEgY}N?H5F6}_ zUvHkXVkJjvX=xDkdh&<*~!)iYlS|6DQXciTs3Q@@C#wM(>;6VLP}+{0)Tk zS-~$B2gjm&AaugLUtYsM_}lfzK2xlzZU_bF>s&t`H)qddNkN`C@{(nA{ROF}wt2Jb zREYesm_FmS-@U&cY*m09$Z?DN>FW?!mF!CW3TOV-rl|2)N8Eqeu!2W zWEtK)!E_vG90Byf2zJ513RA*D^&M;0CnW>7Z5u-rNuiO1qS$Jx{~x~>?pT|aRaR*T45|m=2hI9SqnJ z^D{fU6uvgYew4;w87I%3 zCzXK~Iq)4fKDa{#KJDKqxH;thz5X!@!*)A*<0kVTAR|o`-piSa!9Pfl6bGjYT+90nT4eCyDeHMnC&jElzSB0WDs@S0u zU~a1E(dt6F08=sqaO`3IHCmM}nGQnNJLL2ZWnp|-UyPko!&cX55e4YZlJlwuEO*Ufe81$+&U^kJ;G z(>}mfhD@NbJhw!6s570tcrGoz)0rM!zndOzHPg8lo~L_3Hr;=8HytquM8LO7D#Xl# zuQE^pXBcPWtv}(cP())M|J^_N1B?v}K0u(#{^c?BtM@)81l|Tr8+SiWdv|$OjI2`% zkBATzaXJeaoBjD&q#;A<3+u{x-39n|0|q-OEX49MMQ(}bFjZc-)=IO~W! zJsSZCNF4h+I1QuxV3~6wGparH?7)n3-2n#R%&pS>1t&Vod9w9m>xAwVye8N(3|&7{ z>-b3C17QdlwpHFHhG1aqLc03eZSrKNqx*@Dx5GVZvYG&xeZY2?=?$EB*|#ki zZx@{%jMHMCG}QKlEz&P{d5dTg*R8>vTLAa)!2(@CkS7Ws^Q#y`WT;3tg?s~@?-|7z z%fevI-Cd@TWPs;^)83pV=~wGGE-;h-_y6~0Kr?$P(`ii$9WA=iC#KCE z(dEPSgW94!-ctIQ+~|GoQ!rff{W$B#>SNjr%2->_rgJ9qEb-v|xfZtW8X^sB)f6z; zcoZpH1L*_ssS{PS@W}-D0aCSFlz5BhNZZsz^LZ95uB~q@Ky_h69Fd*$K$V+a}K^LqA3; zhN)^!?KA@D_0&K1;O!#XP@`jsMRKM(XcrHV!2;w!Ck~jPWDks?UJ}UvR8k z8oSLWo9|yjUOc-3-7@#3uGQ9hU+P$2>)7MCAK$}sdk?-wEK}l)>2PGYpDHqAYK-ZR zT-?P*2Z1Aazs#?WTvi>%8Erp9d`kOu5bh{1Ka_v)KIIyHg|P%a(d`0VsXzs05~v5r z$MsbSb#z%8v_Y>l&pLfD~|Q*CK0&0hTo zK^`Pj#k_j^++f!7E3QS=SHeeF}5OXy&hXxSdCpFK|dyCq1*)z@p=@M z;|P(lSS*vi^$2q`R+bsa1Dwp+RoHF1t=tHD6&AyAK1dFwWDi0MM)o&&4T2$kUSQJ{ z7$<(5kgqjJVTj_~L|6oOMF5y>it2biMGU!lr8zwL#sRrU0M$I;sBdC2t+64*lp;ag zf|4d4LW>Yri(&{3t|3SPxB0Vvo}k|cl8QH9PbN2V))Y)$r%Y-GqD>K=nF5~^P;;2P zmif)$*}7f4uEh7U=jNsezy$POUl3}pkbgWDF3us(cKOLqVK5kqz`8k^D`T=eLZA{n zK{y5KS*ovldx!Wp_sJT6^Bdnnc{1XawX#6QkcDgH=XaZX$0MmURn`?=BVaZJY>u^x z(5l*K?hPAZ7*Yn&Hah0IuHU(xW^djAsC1_bPrs0+=qv!4k{&&pcR2{-9;CieMp|-E z>g3iPQ8nM7(}$@G0%)t)tH`1~N)3Tj=q2oWjlnwJd+%x*KYb1m*#{#*o~hwAse0L_~R^gSiz!EpW|UMns@8k?A)<=EY{4(oDa zC?93n3(F|OeB7W|5w>rVvAHrNCoS?sVWvGq%Y-loNtgN5%P*x16jeQ(g^?p%d3ti3 zwX4$o;@$MhE1!xG%=f?l7REP|o_qS!X_=m>ODmf^*CsiX3k;N)_4J}3m~MuFvn5oO z6pcOhM3!V4!)dS8YwvwTmw-WlF(9^s%GEXUG*R?BTdV9>Z@N3PlpezTDC3)ep=Y{xu$L0zMO8n^}S${%>Y__ z_%Iz4VmB9gZ)=?vd5h$2k`s>7J)yX9jdRy!z8=?m`>(%4-Y_9(&Zg&T9X*ssu4G`W z(zauPEWWvExLTGEN}sVO^$9Pa%)>v<0hLG&;sv zRGv&x^1Rg!0fn4@L5=!leU+#N#^X5Om7ag~XVaVC`Lp!&Ir^hAdB~v|=F*M=QX33( zkf@RUk}Z)*v?EOpr?ks@2S9NT42_NP7wGxCBiqQo4b#h#?b7nke!K&;50jy+l1IJ0 z1!zLvJ{)53Ltld)u_LS-jsO5U4@=%pB)~R8ww$~2)HH;`Br?WY>Of-2N;ne@(-L5= z9gbr+pgQNM0&!v(-?o(&Uj>lVP@y{toHw;wiDxq>uRUM`uqL}F>%%>(&os2AJse>K zGep{ST%x|g0yKbMr3amJ_6#X5iF6O99ik8SaHMo>j3nvEXVGis*JshASqvs&8zO%g zN|av*vQfk6s-G)3=XFRQ&?}OO9_F4MA5Oji*MP{u*1*f+w3D~G0|9^|4IPD^hb$5N z@*ef`5sdgUX;!7E~=kp%A>X>`9Q-dw?1azhrh0RAM1-#m2 z)Ax2@&g;mD^3PL*9gL1n$OmwoO~uaeUboxOw{TTpl|{55OV%Fqv(lFbQ*7qwU#J~!ZN|puDmu{E%Ph^`T?p+Uh2*RcGHh z_&8SVzbvl6l;|XgIo=a#7CL@JSakxzVHAJx9NbmZ-Nc5oPbFoJm208HYJ0Qf_|g}8 zdYa|MY2dR9T)#vw;D+foH46G!|3T7cjM`gJ*~013DX=S&qu}lF&4&LcnTcoPo^4g8 zOxeYP-Qypm8?ZJ;@{Pxs+G9btx*pFd*3XVG6F6@%1c$X>o8L|o=tdovF~+wSmscYy zV@ku?9QVGqg1!YilD=tOkunxj7+q{0Uq)A%Ge6v6K4L|iEhygy*wXw0OoL%*z0?}) z5`JhHi|ED6n<`^tR@52NP(pCkpmrZn5yBGK1)=5jy z0Pwsh268I_$k8a9*9i=z#`Rd7QdQDu^1>4u?_0U+*vjbYgM1n0kH*vpnu}?X36>ya zq{?R$Vf!qwivlqO&;wJq^%)!At_-BD3!xhGUbYdK2Gxz_P7EIk%t~RWf0#>XK45{* zsB*~cZMe2aE|waC>*-455k7&eIYw?~;=za$kc;g1Ay4axQxO(gpE}>x%Mh&OgR8_ACT}}7`&b|a_%Aw*Hnbj z^pbZ0smGk&3}G${Qw*zQxDVIZR!vzI276_G^8% z>Gl6`9XTkZr(gPXRM7tEm+q$WAY)3W&taTT(&=+F{(t)e&IEbhI8a+u+m1i=d^#Bz zBQ{O@m-I#|qhC*5{%l%=&Dov1MG+i5IEjqZ$ZPGm@?08u;!3*r{qO6!0DOAWsaJm{ z)d__jkP=eCAoLRIzw>)vPEY-fpGl1~<1o70DA2uh`8>?~58p|Ptl1PrecJ%lECVqb zv~XL$^-lC4lC1Vd3KccC@j=J`MduJFf^S0<}dzZs0AEn zfcWOhTDp7f7WZ^1P4aoy-@6m77k~Uy&tTvSX$B|Z{n!60J@?#8>Eh>KB#HpDyi0yC z4$nTq)B+G0HkW;vA_ugCbJB9kA$ow#3=+4#qKKj;mJll1^Z&9vM20DFC{K z&K~64;v~SX83o%X^t`qSV~u_iT&UY5C3KcoVfdRAn3*G5Bj<2emJ|K3wnj$=)^ND|*^Ca(%`8h+qQE%U{LCEN0+;(Z$0a!!jV)T$1 zY>&DL>O38-?WPfGkIZlG?qj{s!`(Up=t^AO;dj2;7xNo*BzH{&!FyLGQKMvgFx=FglG)eZj8{+1el+1{DVYgbOCRlve z9tcAJee_a+^o_;|(GoTnbVVe!H?7^jjng7S%>Hs6ovtFf&irc~KJ|%FV|AKS)f_jk zuCOn-7@*xufzazA?3#dZA8+4Wn;PJ#gGxFl4IRNO%~R2BT7YYNKrz(Pf_?wE07_ts z_DkN94DImfL~K$EW#|IiYxhW&4$ncMvR#Q3c9p;F)tKSH2&gy*A;b!`0}EW>4i?LH zUOR?tP(Up#e#*&O#;?XE}X36Knpg_beH6BQxH01_5oWoSr$ zn~l$6bdETj=4Bmm=#`x#v~@R&W=NeAhtRTU9u7W4#o=MGkjl@JMR9DcO2C7sXQxt$ zsVNAg$>{#u#nbbMd}Qu5yk`!uX%7Ng!K<6|33Kbu^vv~0BOJgw9l>PVl|aVF=T-1Z z^qxIF+QCR<)7kT9xG>@NzVTFe>}g)V4@e-V5P=xu^DUg3JawK!Y^bZ1uHU?$7EyM2 z)-unvItXR>d~sNK7(d^fxekn{Oo5-NQn0wE$bOst;&aK+$W$C)L|qF3dk6VEHP+8R z_B-svh-Wxt58ir@P6;0qqVG!Qo_h`l0|0COCXoXbiAqt$Y;~rFLg&YZ>qjulS;Dqm z7>OINeJAq&W%cSD&#;LspKP6)EL!)JA*4?n)ab7RElBv7XEJcB%4eC#rS3uh`z zcLEsU4i0*zOwKgLXY1>-Q%@1zWIZ|`(U%o~g;H8uKOha`GUIh=Np(c!GADg%`th`U zf0;9LluliInqek;3^1W{$w5B-HS;(I!!(IS@Izn?C@`XJI!un)8*VKIa`Fv zyQb5fdymp%&t76pGwHz}d?$VS)$VlZ;uGnOH?QJ+(MF0P6p4-+?J=FmCQ4L>v5A2i zCEDU1u?>+n(X7FeVLUACxqJUXDo;(~oN00^AzA5AOOS$a}0(b8@@EzEz2VIT~=OK#r%&9sF)7hnmhDtq>)2?#2%7i%l4 zc<|)l7IA<8xI4Dz0tB5zTG=gN#j^Wpnt>QA{sX`3p47fi5zQFV{PrZqMFJ=ckFFv1EhR002M$Nklfo7Ih*3M=Q_XNBT`sLw{?z8XdcrJ_m4Np+22;-Hr7WV0acKaAbYawfHi#?;T2izYqc`SGYa7Zv&bz*IKZ3{U)RW&8znMLCDP%29nm*CWgHzLFx*do zbR4cC_j*VH>eBjZ8X(2t$}Vtaap!jyy<@A#=oo}!S;ZcW6Rpi)WACwMwpfF` z!S<8V2U)ZOxE|rXWv2WdF3d$MvQfZ3eKG@U*_p&tTOCj@Z(+2UQH-LT2a(AAnPniZ z(xJ=OXo;a$uJ_|eNh1QzLT{{)$gXhX?KuWhaH@13@um)P;Y9W3`FLPe5T+9x9^2l8 zLk_?)N3MLxv{1@Qo{*~&XZT!yKfFcWMm2g3XpnPJ0^_62mBkFql|G0EcE z9NkckfKMf-C&I%?x6}OG{8u-bJrCufoC>^4gxt($th4Dc#+ZYCg7J6Eu02gn-zfC* zV&x8{Rit8;GYk8M@RCc@l$d~F>4!DlN5~p1oUEhyzItsMEj<+#grX2kF841;i7#%X zT~(}N=idlNBv*640hrsbil1xX%m;Gdgo=zeAk(B+0M1qRuC1(bbZ$^tAha1B3GQS1 z)Fff_^Dulg<1e9PJX_6e=G?Dv$Vp=$Ox}y~>ggGx?Gj8VjrA9(cDEBi2}P-*F{jaR zg^@L1Z_Y5wAD>f0!6-C3BD)A8t`GM^&K(CedMBidYv=-`2eWxd>&L_-`bG1s?@wiuZ8m9w8RpB!&NE^L07Da`7cVC$I6htO8+G00_6i0A)a$zy9Y*^SMEx1}(O@*B#j8 zuPnV8fI-lkM|QsVgIj>`V)}pn)$d~%cm@vf)jxhCz4q-70WyQ>PrmUzSd^{w;oaNl zz--#sY@~njzx@`#n+Z}B7JT_jzYAFC2@iE2@b-IO`bx+{4WK@A`$5Qwd2)L=ksrSK z4$lJv$bPm6g_)-~&UJF!toPVpP{9rD!2u`t3f9>aApmZb=t(!{ zXN%s&=1legSUO2-sG;1Ck?VE9?;&zMJTw6Sz=)r^kgAJw?b9c+6j0 z!$Iz&LivbipIhDlOrd-QBpP+jLJ=1BqwDw58ezlH<}_icwerp@K#~F|6Y3|$NICImptn&ns7KgfQbB>$*ZJJq~9P}2>oD5nS zeJPtR;4**MNRQw*kE{)oyZ-1t>pC*R`Ru0XaW%~^FpZNz6ON@{SSB5=u@ay#2OA>+ z;4=#n9fII9m3dB`WZN5Qs!ZEBRx2NogM~80Q5TX&F&F7WZcw=9qu8NBG+;vrn#De1ZYL3S`lhxZ;n|DNg3FKHlqfJ z*#z(k*ks=hFz`_maxSe%L;}wOVg_sr_4qk5d*j0oVr?AupdRtQTtDrx_p7|| z=2%PFT7RBl54dS$j_aF)!TZ`N1N_^wq&-Kj{0J_{|JqTB0Iq;MQy_sVoOAFLYa?Uo zdF1c}c(zWbQFA@`UT57j3HG_cXZjpRq?y=qPl*-k2sT_BY%`z}T;XsfBX2vLkzm?+t{5eZUUxlv zxvux^Kod?==&c$lp-V&nM@U`l;=QJzX3-_1BmJpmod7tY4#GE*y+=AunW9kFa976& z9o<1xv9UUzHm?&gMb@1#cLEm4u(uA>GVRrLog&!GmT2LRxNi~z>=`=}Ya({yfgnln z^jGF!X$c1zTG~fppBw`&>#zkFpKij0F3>Is>j!{Pjc`{t9s>(k#;}yh%QVkW!FF<) z%2@i?MEIOagw0c_pGc>pY#sHf!1FSG&cmw&P~OD4G#kYSjP`_4zeCo2|6_&JVm)Jkv4gH_PI!cpnH<2|#C&Py=Osg12qhMvq3v z!L$aSUv|QU29qPZ?Sn1o74QlQgQn-ecsK=%{#?4PZt5=W`HzKum_b!;9yq$*&vW-xyXHk}SECh@XjM8Y7)P7B*M=3$L00KT1uDPy+6r=) zAs&=y1_n8DuK?LO4s#I}YIlz2>kIR#iyo2Pu&QIHPLs~UunF?(8tk3b_g$mCX&o8u zgLPaYuiU=NC5&s?sb|QYy)AVJY)(ChQIDy6eC`aL5U%qQKl|WpRKIsB7Zj zgOToDjMNbo()(#@1h59!=wtr(HhIWJSoF>kED)f#4*)*E;a*%`OD}xx3+dL)Yv|Q% z8Yd@so#%hy$CyO7Jf9wrZt-9K{a=8A1CRm^mKPVOiXS0ly%NUaiI<;^LZo*oyc@z9 z=s}4XI=oL>%0uMC!p~c8eVFcYe_8JPqwl?ywg{m*p3zY6D(7ht;CX`4S~R@JYaGct zcl%aqV7PU(jwzUlK9tDs>RloL_a9)~Isba^PzYHJnFVKu^e4t+3}-9LKKF3^TK8|V z4~mPd6aWk5jcQE;D7Y?++WPuR>cF8LMV}miJ0AjAdw17Dubj|TV~n#og<))wk9_C- zcT@LSMr4zREJJwf!w<=sh5aNH>>!J6!7N7kkn{@MT-mzj&W)S&iLH(!p-3CN2hNU6r@1$tL0L7Ku077Uod@O_i`>BZ@ccUPM@CfrBuic8ylDaKn6F-RlcrWaj`mzOPAPcRQ^DV~p-9po2JYyc{N$u%exHi~vqfPq9YS zn-K0UV#qrx=w-s!GF=%`!4CKxn-%&t{x&5LKvG{v7LffxS||z_yD8#PH|t1Ro@D{k z7W&eOXOc&#O>8ten*9&^!d2TvNBlFW!CR$>`kx zGGYk&1pOaV#;`}+a*%0=VuZzuTuUHoy#NmzPLVQn%pM5ZZJA|986672ur`X1!M@2( zqoY`-jBGg1YxI;S;40V?U(08BP37MH=(4XCZPzSV*Wt#w;)iXp%Znr8$!B?zPxxD> z$h1GZW5`C!?&_4u>KifOaPt`gVgImjMtHer1dxa0Z!m|&#Tqib^&*x!9fqZ{R%1itlOyK!x zd2cx5-XnYzk%OJ|?(!Sy_FE-JnO{y*M8!5}gPCI^%|W+dT|uyiuIUG2C+&*sq7Kwq zDj<{EOaNHgMIZLzJbM;;vDLN`>vY%zX*+g063kafvn)WMikBpBayY96-g}DndbO3! zw2UpT6OqhrEvEtOXqFa{I%hg^PP2?iIy%%o%YywTy31!aA|2@|+S4FVD}1 zVr-f>f`Hqvbswdj=SI2)2u;Br?O;^b9xjs)L<#^3s%L~LOEWXk+~20$-58!KsRy!! z-7piTGeph=hnyt})E@3o?zg#FA+~;7uOh)H1+U;ZfIg4WJo=z6HMVR-H(4F3`26=TI z0p(tL01h_$K12bihzB31h0WzF7*|5GT#uJTk;~y5-$+|GKf(*c)52h>R88tWLOAst zJ5U_qU15Mdl&)?hQ1kDoVn!&#!{orpatLm*1F<j$p-m90Fsk<=?p|@vl z1Av)vJVr8@#6KirrLb?C|!Q~ z$>>#ihdf%9N*4#EfwJghfXL}F!n8Z9Joj!Iou;R2AI=o}ZdLL?FMUG`&2)YQMS2!R zOqxs`*7-7%&iVnz@0NZ-HE0bP% z=^TAHagqohnpQCm%l=RQ$A6Rl*+2P*gfwT6iZA z-%J-TOw!6|EnR#2k>&Ih;Cr58Vy&YVA&zWS@bmR@}2#ne511|wBs zXvJ@1=z7xgKmBv$((a_Q0Pz3tfBh=$<1VGspZi(zWap4G!pazu450AZYk$cY;<1z` zN56-VQhk0F#qWsW4syR&P-^X5seG9<4Gibn?9H_C;7+j96~OZ*{b6@FKO=nR^}qO| zwDE_(OE{eTRt^-RLnsV6Mo?b+@cZ#OPSm=(LEA(2vzxOpcjHz%zWb2yKf@L7Wzy8+ z=Wv!_6mV{e=%yxvOYBgvMi+|dOOGM<$RNVhmdPA5-|rTYxG zID$>vhgpksD$cNN!ek{j$)D`vwF++;Iobw*A08~Cl$kO~(O-`9efIR3w0fJ-%H&uN z5VAca?V*W+9>Gx?CQZROGdey!grQx995^>I5zI)-p)cBP}0P1 zho%U*YW2&bY12w{}xhcfjw;5Fah}OeZ|g9yF1s6F$R&q!O=}9W{l@ls7-dO|u{{`tS-IG@?G7 zzxSa8PYAQ2?p@d^!A>6g*pxlPp_dVFK;$GM3q_oCTTa@`IQrYOZ@uW86C5mi^2+vC zWME~V0SNKM%)J4I>n7 zn_6PeUhPMd-`ko_SycYaCl-{IwEskTWh*5uWB{}5k;SJ*d5;BRfEjbLw~1(VlM`*- z#lXmryUyBE$a_RfP}Wliql^q-pjacG30cVFxH`gmlTf;C(kvFW3r7K`zzB!{&U8h= zr(jfuH&|-HAkP)vw1|Q|sPh>Go*|Tpd*z1lt>;(!$OlrS1!=;M3J^h2KZ6WH)+y?onXAJWGU<)~`HXiF@tfV!gebCkPYCPxn_j zYp3Rz;zR_RXA}Up2Xb+y!HL$f=rk+)>X*FXUt8WBlDBclraN70nEsXfGdn1BdS3!S zHVIYK1S|BE8=M-aH4YW+Gc)W3af?Eoa{F;qB!dVw_*_b%Zk7b;^o%!AHrTgT`j>0MjsC4;(GrBx1|=>WN%yn(~Q@lJz6R3>yfap?l#_+q;Gt#60&XdxR9A3Y4EIWj&8tJ<69?r){Z%a#jN&nzY4ft` zu%9|p1nvHRp583T((}CUdrzO!XX$;Po?d3h3>E_5PD++ZOIGa2mTk&$qFmTc<-)FV zs*+r!QgNFX5CQ>$SO$Z^Y?!6@eLsEnEx+G$ z05@vT-KWp_zVChCXZb&S`pv)h57R2=QX#Ljv%8g6-h7S75r)I{7x1>?K|0tBWwQHt zg*;%2EbtCR8!x;?<9Pm}a{&ZbMH$=UG=oR6gZz=kO3($qQ6{NTz0P{t@Qq`2&)8QD zaOKFQHgx(MFvJBvX;hC;MN6qF^dhqUD6b2qSWVu=Vdq)Bdc#vYWkuf}Oka%m*w|6nWwD&2+T!0HpxPf$zo`6JDX!*dC*34*?iP zkcxbMViuzV{xQlGom=?4k(n6FOBAL|gv%bB7v{*bMAjYyYV*i_i?Fsf&(bv_NjY@J zA!#RiNlgv2WuU;r^oI_fDGm9Ul!lTPKt$UoyunS@pgvTXS>Vw^(Pa>Zr9n8@!5_#? zLjUJFEEKF_(2Bn(WR~d=0StM*M#b6%)=0!mAU4IGs+cX7 zLTO+GQBb4zi$NiJqbxR7HoABXmkRWLz_aZ#)uKXPaxI}I!bZ=LS95kr&LY-vlh~78l?KHXc<}=G5b_p>xO1W^d ze}r>_$SY^B7g8XBd5LjnG_Or+l6s^T2B%$Q+>3*Kc$_oW!>>lRC%xxq6viTG3)?#6 z(Q0seM(Q_tS4W1M|b?c`o3l>3h=*Va`nX@;o#MdJFD8SfW$u1|>xEQt8h#>0;BjYUeD3rzr{1m#Jb@*UejF0Hl;X|?p7!Y}b;iW-D z@qC+qSZuMD0!F3-B)B;LycVC~IW>xS1nv)!%UDuD(3U?_NXqj_hw*nG6j~2D_qr-& zpM@_A?!CPKlTXA15Hjb@+S%ir)!4lK4CDCCAH0=5e&fwl+1g0Gx$8IxSS#;AdW4*d zngy_7Ti)Y2H8L%>9l{VcvOYIIgV*&u3~(pPW!YEt$KsWF(q@=c+1ujGkXbzU`Lk!V za(sj?IK=k^olwhSDwPAJrZLjH;G7Yn(bjmC&-$4H1%6jfCl zXPnN2n#Fqx3iBpBpB}{uHH4TOqCzj?f6!0qhX^!jR1PXwMDZ3KPwhKI&5>f)@Q*Ncpo0EN1X zA}kSp)v_Og2v4cbR{$wE%~=ZGzfpmy_Aku*V8PW z8M@L0^fQW)QzrH0h_fBX6Idc_a(LEAYwIiN7hbuTrVEU!y);8V+R^mhJNL2lIs1J8 zF73Q-y|e&$?-CBajZuTb2OwUiyF(k{wL<~gM3Gi#6fxCL5e6ua(J^{)0=}0n!ax`S z@4R_CO*+LGOZT5Wrc4IIlYJOw)M68bgY8e`gnpO^^*m>^MC9Wf#kKkR zJL&4Rn<)#hwT(tM|hS_1t`Wc^qZ#(!3c$EhBb$X%H)TL#$3Hhj_7)v zB@C6Wy!>ivE?-Vr(mQ4`+BP4NgG?KM4)ofm~U;F|BryufPV4qv}Z>RI84^r_nU!_0l zJE@*ONPp!wzLC~>y$nrXC-vj_!Q=Gh|L|AR!`J>4LHMzlgK@r#&=E!*Vc{sC!813_ zxj62(0sq*4nuCRu7*vDtI;O`3KHu5pOzEa@>x(ziGKTsa-~1N<27mzu`^?p)v~m8F zy~nUca90lSx+*qVn~JHbVgp2E7`LnHby&*a;SM?16ByliE=XZ2lb2maCbS5xsRZ>X z?Gh2Y{L-zo{V@hsC_pX=5ULP1)|hqb-~pW>hTtz#zT^vmxaVbOnlYv#+J!~MYf-S5 zFr7i88umwFkmpMgYXjh;m*xEQAU#>%NS7$UTcfBlfE&VI1KG4E+oBzxF&u^Br(@D? z5SC$tnnE$=GXp>zcWs?UmGzz>hcgsrEnKGH5}GI<0A``X2G3tZDNN&`j4EsZhce8R zhEvi%EJ)Fl8yytfJAifpno)ik{$-)aP4$kscSM{3)yfLxhhWXR1m%5nePHb!?Qw8W#ZkoPe`dl6wJV(~6`ydXSG(;!aH(ub{$V^hw*g#}6x`ea2FlTK6t@ZXO zz+o@+;12U#dby&L1^ZFJs2G7C^za&GG@m_cG5~oxm27M1U?KIWsUe<+^#D%GGHS55 zw}y5}nl#*xAdE{Pl;F12(rH!QB0(5H0lMqkj!K)M0i&mC6az4Je zejJ?d?z0ebt4|lhOE=g@)B4aUL_0Zk-fJ#7K4AFFl!)LFsYg0v4T54lsH!7~DBQA#$F>=t(~ZU1Y%P3IjaJ0dA2U>P?L$BWTv4M3*bi z-M8`HND~1_y5B0~bjAzcX2)~(fP~sXr_NGT2$;|W+18Xq5Cym;6);5JrWUk966)w1c8REQe&>V|rGvltm;W*zT=oG#Ts~Om+$!n%HKqfWJ85(K6vcFr9<=lt zK4DtY*nC>uTn%7e8kwgH3E+i$4i2(r7|p`UBEWr?wpg1xJ0ZXyKYo&)zj=c;Q{?*r zu2yNiRI29E&97Y}m$H|B{Mv^wD5G7^nO#pCp&Qq(q-&$O^xdESyEHg?mj3+Lem#BX zC+~*B_@&?cYPz@bkX+B{^po$snSS}}zet+Kb%5VW`pVZ{#e+6Yn?PvqHiIo_8>10( zo4&)JCqSAs~anXT*uPP z@)XA6Q*sG0OOO>71KI0x7@!~>VtF9+P$HyYm=k;s5VW<fZJ=u7i@)RCui@$xBx#$-s($|0aivUNIHAZxkFxMGmR9d{0G6>5etq~6%e{AG0 zJX2FMGw23GvlJC=&_QN-ft>vL^xoU=L+4bLvkqzccppO+zRzADq=|60`M&wdB?{+q zqm$&0F{FY}bD7s|JC@D>Y4G0;MS2x@W@KR@rXgipv@^sjed+40^b7;hDrh?&nBtMA z(!|{UbHFnU8Nl`k&$PmNXa4}_z$EkyuVU#cYomh=gIx^$TR$NfI@a)nu1%ZKGc2De z=#}6lc-ZHgz!Powx~KA{>&Q9AhH|%q;h_;;2jD2DP5{|uM*EsCYaN5iz-drGX08b+ zq1DwHhMWrNoRFf5qK@o$;Eom40!5t+WdM~%xXtHhkO33aJT#E`JC6@irDRWCT3CI& zl`c@kwzxb?|4t$ZtlKtaPUth|9ahK40>r|X4P>%1x}TGbOhAzrF}wvwDk*`gx+Yo| z5mj>%o4{6(p<34~t*nT_H_)4dD9nrYoCT`N!WPf2L&3aCF39yz*m%Q7aD3Q?t%6Y^ zfD~A^kdw2VjU%LUcx83^IK9n6xtUo&HF~QH__t%mF|v?&D^CIad2I|}HWq!-@9TO( zUb7xnBWrD*E#xHMjS86B;6y+T0zs(WEn9{Wn2j|Hcu&~3HVyy*76P;nB3O?V=T4hE zhp&5t{ms1{5}=+Pk&*zdA*YR+RA`4LV7B{$&HK$+oYt5kDm4PV+FH@n1wH)&Qj42( zY?a8>bvX_@u~^#u)FG7N8RhfIDY{Vb9Of_=d9RAYw2eXbBGO6Nl0o!J4;qYCSQ=35 zxv40_M5**_tHUOsb4UM1q!*%=Cb?Ne@GRD~gN5x$O&cxD7DiuF}(Pvp!Amai3XEfvH;FUo`3ak!#{ENB{EB z+{+!}i?_!T?2fSjsjd{@}_SlieV+>nr(;deB2&0%~%12OSeBM+`&!S)J1Xu9Jt`I1 zU_Fk4CEdsVtee?fqki8-UG6A3HlAH!BOz)Y!b&@HV+s(%Fhq3p^eIZyytV$w%@`vI zcpEfvn=pkrcNPW=fcP^IbrzxSh}05b#*J-fY->Z9m+4Uryb?2)=3xQ=7YwHNc@QgQ zpWL~{D=+Z(3;^M|^lWuI;W}g=Hu`zL2#pTm&>|Z>2|keZPZP)P-;_>8K z!SqEu-*mMY8OAE+{T)2MT|B@xWv}U_V2`S}^vk3VrodLfza25^X##;hhquQH)+QjoS|xW0U~it2 z4c$km7M`Lor@&e2#g<7{*Pa1lC0YPJO2=@NOrW3_pc}F>4aNK0qfu{~N-3;J?&H)u zi`;B^GJAm{1K!f0Eq9X>C_u=t}Q8j*yBuLoOj1y~e7#u1@!EPX3m?gY6DtlIJ zK9K^_rsguFLnAbn>|-enIL0=tP{OuL*RQco7>51X-hGG%pOLu;pwoS6U_GkQ2e2yV zFI_}Y&!ji+tc6iE1YNh-i^eHl6mUjylu@l%>yv=88FH;A3Dehx&eK)ei!}kUv?oAO z71E1%E88b@92mnmX)jU$a!LUp%!D<3^X1QCp>Cn1=F&U6XX)typs{$7P5^+LIwA1g zYT5>XKfJe{KKJECTKmxHq>15@7{7@%H$Wb76G5m4tPQZPQY|}_p<)_vtc8pgck74?(Aj(>#Wa{*ovlAl1jF;6u~2e?t%t z5ZxG@o+B@JmUe&S{0`Ie+Sl>6b}&%7cvMH1<{@#1(l|{jrAy?%j-&J*fAo-ISQIZi ztYQ$2vF$G3&s8Xw?jZ9xC;p}q(@QTeS}9v*KwZ3x+B@r@&4oy;cB>xon^3>C)VyI$Hq zA=FRGNvQ!iqp)+@>4NmPy?l8otvn{@ovz3F?aHzzG6DLl?n*tmfl2;HI$wJ z%s#^6$2x#g4`&{^X1goXy}VG=aNxD~n(81&s2-tHOl>pgSA*j0U>{{j;SGS)R%@oC zsK3}mlrZTvq+s#%?60jh1uNW;e|px8L`I)>$)SNp6aKN z@d7_SqXWYn?TU^nHs%bvZL=PuW$(Z9KD-G{lJ9Egr6%i>zD)Ns(&*^-7~I5b(thZE zu4+px*R8ibfG4}5m(LN*>MNZ`($hE|UJJ-d==E9~@XMy>^t_R>VN_59QCd+as<%Qg z2>W-{?$4>AU)Xe!*Gz1nSy$XdwC8h*Ps&~kw;}AT)PqX z^30%7uF~NiruxMFq1UDa#o4T^Z$R^=6Ve?BU~DTk9e0s?O`U^7k*eHV$DW1OY`)Cd z2if4eLGF{sNoD682MV#7u@oJ7ZKIxIENRE*a^nTQpFZa&x>?c)^s4(8J&V9$Jvb~~ zgKL(M{n^(sYJ!yDy_P9QtV3fYdfjqQuWyfCbCtaY%*AH_iDiEF8vf2XYqr@fLcmz$ z^%8Q`8le($#0Y#FP^r=7t^J?11EZu<>0Oo2?IG^Yp3@DhN7t@Ajxsy06lwQ3LSOJI z6JdP9;9mh4y25Bg@ioG>U8nf!IQiu2E@UH!5K2gmTO_ zhYn!T2`DC2{^zn-x6X*Ku~@_D-3)Gb!e3`t!_i+AS9uR%mf?WRpKzXbL7>BFC`2SQ z(;7tdqWO&81iqU*AMS4mLvUw`N|&EK%m6>^kCu%VyVba3ShMvbLcLU^7dZSP#95(F zkc)}~&L~sdcEEyUq<;c&IaY!kolq;hPNSV1yoPDzniQ zfQsJ9A`15WkZN#&E~;rduh< z_)8W*XdcrbpD_WlzeEkA7BYbq#P}l%mmH&FK;Nb2okOx8CB~?X-4pKYjQ2-blwV@_RpcNN(^!`d@zbzXnBF-T$rs^N%su zxHcfI0w7r9`nF6`xqtV6|6POzX%^62ja=m?47V7%Fo(zPI31usX_l6r;r;ERutor4 zJ%G{O@BaYW$Kqx`Gk}JzhbweL8OIw)Lw(L@5U)%fKvzY$k0Wpz6a`S(1YcrglGb1z zm4%TG9>{ROrcmhdw_kT7TH&w>fPpgmmcc{gq{J5Y-hA{ZU07UB0}TA=>RrRzWXr(; z`g>o#dWAx}`E>u@W4u<3BE}o_!|#444P|EnM6{6uW9)^K7^|$iG%=g1g-z&4PghE7 zPggN;2*Wa(wF>_}!~1K<-w^Nk#Le{Vr*{z0c>5_L?9fuDVkZaIH^=aXX(CkD78lYx zq$M<%A~r_%mgg2}JqGC8{cs)6I%|Xir-)q4Ow*&6zT*dYUG*+)uaf7;p2?d-0Qm#1 zalF5WaD@*rI8Js?(>&|btKgS%;sEcNp3N3=#}K{keZ~N}cAq+>HbNP?N0HsRIpHV_ z`Oigah$_HMTNni>Sx}?#YTA>)+5$7*+oem3?{y6PF$~+K-{#2>!+;Lf@R(Z&#d%bq zasFv`w-U&O&YOM@9GTUuDnl=C32Cq zY0UdsoeIi2JvVyH4Bz|D^e2I%GK)=%pTm>Qp6XeSymrqFTFe$E0?61p($S~_f4!!9 z*3Qu{jxaVaT+g)E)8Q~YJ_3K>QDQGDyx&d&Mbi0- z!ZD(i2L}g$^t z?DBGm{CMGWufWC_1MIhSdJb>+{_#2VI=WT|l+G2wXVltoJ)8SShmlWot2HX;XJ=95 zmWB4BFcmy7%HCRtW}1TXzePXtM;mVi$kgaF1w_Cpz$(G719(^4dgi#lUT(W5@G!v_ zedmb!5nTYSs0th0!{Hq;Ig|uZ34&OIct+24|Jryep&462Ho!A11Rd%%`H}O|$cQ2} zuB%aolIBHUCubEN4gKTapf{8ro_uqp#cA*#bY*Kym6|#_7!w+Wi2FYip_Im9C`MZ! z^w+^!}BK`%t!e-^LrHMm!RUms9151r35$Lsv=cj&9X2HxQMk$Lgu`&}EKiDEwm z1$%=C41+?$A$l3}yU%LzV}H1p_u`+g^Gx<7c0a?=u=o((Yq1t{_5HgBbo>t15u<4M z`S~#Z_^#7LIy0T+oK7R>7+S+b;|1AW&QomAWm063LR-^Zq*FSWvlZmi^9Z>C9H!;1 z7_(l*a6uz(4y>`&?o85G4@kn&Yk!?VjaiRDn+sFl$>B zg`sW23_W)Ne9|F)OB5}PsS+VivWbv4)YO9URp{=g%BGHDi6R3CagYb`rnc+SYBO|b zftgjEqZZ_l3ZNo&jNT|jzmL+&nmgpda6zpn^P&uc4?+w*2rMiCgr5M4t&Ia$K)N{! z1ydsQdP;=$0=7-n7|bTZx9_E6DrNR)i-z?d=>%*@5#_H3MXP*(O&t+c@hD)briG_bcVR5U~8?+q*^NSWty4LmJ}Yf8h|#KjP<4ZL?v z{^oEGy77@+dQ{F+KG5b(A@7-+4fY2G1t1XJKIXd{7G%j|1uiPY3{pbJcpY6`2A?Vyj~) z=y|rc@fc%Dp;5M@-|8l7FIc7lncr6foVZ>MBE7+Sph^e^fvU7&sLW@M^bGDJ)2nPc zG*i|i;PWdyEDVs9v{(flyfv82LKq4A0=cZ?%U4oij+SK8%M?rPq@RE5x6{}EgRiDP z_vIG|`M*VR-Z?2QkSt^HjFVf50y#~Vbl$+yGXFgQF3Lr$e1 z{o5a;-}u{q6ED~v&kSJT9PizE7#^Q1ix<*0+SvX0PyRH$^4yiQyjV(iKDY-k!K=yV zXRq8y|L?c|b^69Pem!k(d`uE5u+)|OHyhI66A4BaEl z3$wM*3!nc=^mvsv#%5;I2}Ha=p3{|`zi{;OyyW673cn4?u`e(V*Wji z?k5+P)1|L2rH4QGF*J$cNY{yIwBMlLG?w!Gl}ikcSx=9iJqsQo)tkP?j0zRFz`sot z9f*yy!>Xpby2hOI9)^QOmwEE|Ra)|Vc+c=~W$=CvKtJ>6!;a1AckK&NJvK1dd zq{V(`$i2S#;q{Apn>X}Wlb@-%QKXP31r3-r?Ii8DlfjuB>;5FG;Ew>X2R z4`ZZ4I@TC#X+mdCh%!uVp_2D<w*3po90T#|Mazg@Z%+z#gAs z+)Z!>c2__l1?`+uJ78gB1-cKNjLt3~M@-w8r)A>V!5#)QdyX^2;CGQhOr2M@Jj<>CAr9PX0>P6N+AQ6@kLLs-3XuzX_2Tv)Op8l^g4c)xm9YG-J3TrS zxEKgAKn$yPWM+={bO4RV>wpieD)Y9&3SmKd)U+&P%p?fi=FwgLXgJ4K6ViF)*}yb< ziLJ1;Kd*!#fL9>eW=1QcXedyI<8Ox9XQ}pHn43j7&vCshy(FLEKBq4t_qAZI+Ia+> zAdDgK79rqMD)+Gr@a!Ql5F)xho8;}}0IGT$%vn+)x@LX@ob!);(9;0DK(jnM&+Yjf zaOis|Di*bsz@)^7^ANf^I^0Mwe%vn$OjjI`EmHZG$|@J^EJqLk`{ zVvj3$`v{xVv2Kvh;fXZgcL*=d2*!d`Ri(|x*474Lfx)zR>pI>!eqY-rlxN|d@RD_s zLu!tf#fJ8ItPuW^IkbXV32U}09iguc$bQ(j*X|f~QVS{+j_KXi^B_>}LX>`2Hwl{U z?9t%)qQ5A4g%CzqpfN~82IUS~Wb!@-7+#8qC~*EYKt_OM_DWDZhoTz83wB{-EFTBF^wMW0(hMC79M#%t z?WgsX$5ie=PQUW?7t@Eg?~z*3PPbm3Pv86|EyNaQ(r^A3|9N`j&3Dp|fACs*;S01_ zyRej=+*@V1!aHgH`I~75`g#AYw|VbHhBaW(lC#<)0+E@c8voXm4A9~{Kjo^Iot6xsLZ@r(+9^FoxAKp%%xp9Ny zy%!L~6wPJ7#TjEHqr^>L(LbjJpV3OkGiR5Dh(th7r+7=AK6wz;zIt$t2x%zDDE6$j zb4ZQ6(;<4$Y9Lkt9IMbzmQD$J+61PDq!IkvAG}M~8I-=^Rme|4y1lxIOf>XHUaJZP zc{gAxhjJNNxc2BNV{27-f(Z6R`sSXRizj%@jS@BR-dTic8jAF*(A85}CM|&M0E`#p z3$kzsP+r|(8Xh#QtW<%_02qu;s6Y${D_1SNsKaLhnqz>05rLXYoa+{ecKCNeqM`rp z9DOj64aYpo2>V{6KEX~5*uv!d0)%MQs1nngr)pr$pt&?ht2OJ z!pQkq+K$O2mF7a|h zS0m(rpW926&o>>X4qcuj>@@&fb9k^F6o9U3LpOPheOrDRNwWA%0Kdl+JLy6IX@saj zx(f>eya29{eeSgpG+V}vk+ybB&4p7V{i9I^M+2V$44lqqX0Nk&Rkzs`O_?+k@P6g?5kfJvW$r=E(3dWueBA<7HR-^XNRkzs^&__Jj zqbm;H?GoU>Y7_&S^gTxE^rSkFLa*>>iGJyv7n8f;4~#ktzJa01IL{6(s6vtBRLF82 z4IPwd0M9}8Z0X`?`cMCp-=KcsPWsc=C~Tz|I~Kw{q9cVsk?Y1k|Hre&!|{Md2CPkD zBfG7coMSp+-K8x=2%r)dN!Zf0#5y9ra_w0K3$eVap*8m@j zg*33(pm^>s&uSh!1Oxy?7sJceDaaErOgvwfdxP2@qSC(cL%RK5*QF`xH_ z4FIO&uuH5l+Q;&m4lCCv@8+SE3dXx2vSXS7pBlpXWg+G;#*AyWo5$f6&Kx4+_Ug`2 z)P4+KuReW7I%0_wFtP`D?un@jtjiP#Qlh|VS!qir7ogU?vJIYX)5J-3w`#h7o+Glb z;2F68ZW4Q>PH+Z5C$D3yFGoG9D4+!=DN~gXv<*aPb90$Upjc)x6O;4dEy+TdIYOG} zJG5$oAe&g6PQ#dj`6M_uOhIJGt^~-?B5+EQ%w{8atK)n?El^>U7To~gS-@g-Zv}5o z6Cnd0KVpli&q9inBA{aA|-;RH(F)PV_}dk@cA7KuLOW^>&WH%{9VLeiV$K?|x4g%k-b znILUMLL#$@wa&VwYrRfd)8T<&fAl0BmYOs~Ir7XaC%BEObq=!wwb=gEuED4wh=z!n~6Fg&lV{B`tb21 zn)MR}AxHPm-g+nP5aFmYTC_`NgCjh9>sZ0&?A^I@HyuOsH9P^sFe;6GVp5G>XopF( z*BAjzWo-^`rBim=>}vsqbZX@^VfNkK1H74}0n9Q9Yi=&xd;cEF5b%StAL4n>@y2aE zTW5lp!#z&Z()41QzI7w*z4l`~RfV*4^(u|+=hK5HPZ4CK44_0TXwpz67?;Wj&FNWk znmM&DfUgWsRxyT3yBJLHgCmTKD6$qkV=W}BlXlPpbdKSLJ|efe+h%gwO;YAbb!a_9 z7{XsC8wZ5R@dzKS2fqyCEgmtKiwKh=ElY-8kt2FO4T}fC@VZ_V73K6S9Xg=F^66kY z+%f-|@VBA>IdEa&Vme2**s9J?1ZZJDVjv9__f8nwSf!~s!netuk~hHG?AHxQArj^A zfrtQYQS`&UYjBx++ya!G!?UM&9?orb$fpDzZM=&1Dpxk29#a$xz2=dF3Uu>rmAj_R zoS+mMc-w}d;XU${9r$8L8^OKlQ0BQ!kb0sXR1tS6RxERdMzn^IQAXj+Q?uYkS!sGx zi-NRKK4ZS?$oO2ysO?7|AtL}Y$REW_C_V(PUYm$0K|cOZoNa*iyC%y9d=jr@$02o67!;pHJPAIA?pc7obEwCntSZMki*;60sjfwuE8V$T= z;hILg_(Ugx9$+l@02vM}iHT(LIq$PT#eCubytW=dI!E|D=0@C|O9fU045h$t*1eVZ(&sziRnfER4n?fhw z*&>b24zL*ypz;pB!kLF$GR-3ZDr@!;;%{7EgGq07oC(i4fKF8g>4Y)#jk*qFBtu7- zP7TjHv@ETUswX&)04DL>^h<8Rr3KQairCIkZ)SfDT+fBb>_&qnX*U}>i?zYuInKOg z$d<^d?=w|E9@0ZtVC@>QFa`(2Yy9rd;)RI3c-Zi2c!Qy1jXYC>q$BU;>v~D!p3)y* z>U$%1n0e^weve=z4wuKcMqY>ohoR8{es@AIVktnhx+aIAq`YyWq%G)9(aUR3p0Phf z7#aRFvbc~=87)6T6wQ>D^6hs4@8BD*Zx<1#m^xTwkhX=B7=}*F8#da-b7%;MLC>1R zQ)UcE^QMU!G16gR3QQZFs%%msgfY!h!vU(rtm4ypuu6A;O)ardb1Tmg=0heYb0dMx zU;s#g4Y`qS5F79GDF;ueWXddT1m$EH%)`*FEE8FY+V7iZPE$W+;b8Tt8Sk{HAS}gZ zDTtv02*}(rLN+i>6Wd5%CkdS={r#4I~ zjp{~;#_24>JeUZSK$tR}my{O6gD$hf&@sUXimf7KUVGy;056s0c=}YBQtTK4dl;gY zx5{{U^1}dzBJGk8&H`&&5uNQH^z%0Xg=^0?5E}Wk&B$TLpw1BP^`1{CT+-4NfSUK` zy0x+_kSO8`Ii=N+6~2blxiMaFEmD=5=?~BqftjQ3Pm9FU;69kqMaSh_f8X9;A5yybboN2OEH=skHQ&MF0=2 zak$#?{swuYNC)J}!JFw?od?D?uuB6o!CVnkX zavnpG&ty3IqYpmBg+pNwR`BBNZ2I`_M=5v6-~bZ-X9zQHGbF*>u%R}+uZh4ID(trn zva>8kBuZ$H$cQ29a{&j16B?`I0G$F==Jy_dOjm-lbotfKrK`{1NPqah{WDmHbQz9l z{*_nLAYQF*;U3f9c4+*4oQ5x5C;ZMh<~M%|UE%5BjBWF0j&P=Xkd9VYX_QDfnUJ@A zw(UN04p8X950`Au^%NzAcRbsYkLXFvq{h<|It93y(1HLZ!~WvjPxo)%PD2zmmN3Q! zsMK#G;BMc$Lwd+nCO48RiQ&EX{9 zWWO^gUICNcNK{TWA{TbEDO1!`##q_b>rU9(pik|DDPv6Av!A!}#Ja;$`ZqjIlM`FG5Urd5fec55PNJzed^LX7X?rQoLPWZ#=tP5bwU>m z<(;2bUg*iwqhV2&0}ci-JeM>Fq zb86P`_$<7KH&H-}74huxItCdDnORMA;Kv4{6hP-^~M3`l^t1)>6K zbL=c?(F3R_CTczSZa6*^Io>2rE_hvW9#gUe5?s%;E7QA_qsr7zL9iax$nE9t$_)Wb zRBZbT|4ilSAwx9;2;zq0rHjF?2k(SpJOQGGPNVQ)7lSRF6r5WQzz=kaw7WXq!ErqC z1GFk@lXtkiz7cH*Sr9G580-LMUwg2UE)}lFs9|}k1N~-a884s_1#tJw3k)a8`RqKG zdM$)A-{QfEq=fPa%bqjk+SaNVWt$_Vq*>ihd7^oya;Y1QVA!WVYDu_&MxSRU2p9ll z>V&fW*tii{w$}nRlqa`T=OPumc>guV;Li%+#1YEsO~I|eLfP!K9Dbobaiq2l{w;8} zsB;L}BQR38JLpKGE5^j4*XPL9tdf3F%%X?+{Qf@C8giv+iwqz53|?QOi1X6mam_|d zIQ=s~qD7#OHV)H2`R#9GXgb8kXXnil#iu{B1_-1NWZ?ll@_yM*Soze(dC~vkA^AOU z0ele5FV<0KyZ6Wmh=A*Jy`U}cgWiRn{1xU>8F60xj85>qF{T%Mqp@HQ{vt*ME{CWi zDU&lFK-FjC?_O7DT^u@R-@~aFO2+4h?qU<&2-oFJ;u(3JeGR%mFKV!<$1E;~1; zgEx4jp1IXNqkE%=mi=v`Lv2|aii^+ZIeX7vJ_%Xuk%FmAJ9RppoFN|JRo=tBy*GBcrhK368P-lW9~tL_wqFiANGr9 zG?iuqy=R2?4EhXXC(eRr<9a&tY`xY6cMd}14eBfq0MJQ4H#h2gcuwh9kQ;oZ@xb#s z`A zKK1gdh%EL{LF!wz@1DZTFPmDMzXdhIkO)yinpOHm+V9e9u&e;H3hg|jF$3)KOm==S zt-|n=<1`)V=Y~2qu!FQ{agTz6jRUc@Og~>#XFczS!Q6B>jRRxB9Q+uqa;h+;71uo& z%31T~g?OlrYOEa-BHh8Fc4J`tE{8lw@3}TbA?6kGI9!Xru`UIR&mICD=9hpB@7p%0 zfRO2MP>$w|if2Hbg#;KW5IS^(uu^$Tyn28=NK*wQ9{1N`j37D+2hls8VgGm56JBdx z=o=fp&MOFtS#tZ%@RYA@V+G3y5S`3Dfw6LNc8c8A38sUgGu-GI;;ZV{(zp5k|FVsLHEy0^#AAio9gjbcT1sz1DCtxnBXG7dt{* z2te1TXTL^XrhSN`dJ^Rt(E(3OMns{hH1&YFmom46?2LDXJAVl9%c5e>Lz(ioYgca~ zSZA={p<#sQWfj(3A^rZ{H`5}*_0^YO0_5xwPNI_mwDc>#`qlJDzyHIuL{%Xn3xsMjy{vK#(^9GPVX%kq5AXzs&*f zf(ncDZH)Vs#VW?#AbG~a4Aa?VZ!J_CpnF7{eTozw zhXe554o0H`5DFO8Ifk)}&Jl9n+6tbjL%%9I)2o_XyG&P&J@VA60CpU3;Ssfi1Wb?e z0U+>1E*fYza4h<}7|y&$BD92$$h)G7zOH^cBzgF?nhJn4Y+*2yli4lqd}Zywi_ z^biI%IS%Dh_8i%&$25wY&}^nO*u4J){@+7s9d7Lb1j#`j#XEg~p*L;*KH=G*3!+x+ z^$Ebl0*A>H#ylWy?xIrJh>zLG>)#?MLrycz)sUXh()bQ&wa#^bFamznko* zduT}V6H&->vJTd(H%*ynI)Jh;Krf%O*XQ=eCeQnc&}J`f9a#XdwizLv#fz-rK8Wlz zmvO592qTN?8^A4IVB+A0$XkHT zLvqu*UW4gy2uJeJX)$;PA91}7bU&mX11#vNH_|x-O;t_O7Ie8k&>2w6g-6klxxyL- zEz*2Cct!`9&}A2oNTp*hr4ws21{kp9mqlI;KC1&p{j9DxA}OsIlb)V>&|1JE=F`UppesWL;R|6rvmuicZlANLqS&qa48c-skw0w+5*cwUY?4(PeHM*^*1L<|V7)LbhXP-6t)L+v$_NpWJ-=I{BoHo;3v4bbo5CCT z;<-7XK4@kCHFiV!3(VOY@7W-d;cy^(!q$|BTvo@Ph7AnX|17#MKr4q(4P?!=eLk=0 z^<4n_7C@V4MiKHFh8?{5X%SqknfrOvq4#5Bd468gfQ(45e-9i54R9Tct#hnFGt()8 z1MGp@C3f-sHM+Lg*3N7DNwJJN-wq2tl96VZhdnZq%-{I14k=~BIq7qGB7eiC`LL8I zc=KF&ZwY5a2RSeH*k0Y}=bhvAFAxZUF1H?k6vt@PHU}m- zC5wP0;#A_99C88YfUPV{v!NT&YUn{dAz-$-fc)R4CcwE1I@H8lu?9(l{{;Si_qUv0 zPX{|_5u0PDw{i3F#|Rz{Zh%VdW+}Wb_5(fH+YhgRJ!2KBwy24CoN!~o_Tr~BcYw|DkcS^R*#++g|GW*Z&)FBg#NLd?Tyz!7%1Fo5M3US zAgBW$ghG&r9Z$@WK+`ri{>pHLE9u4Om#|2` zz#4g8JarZwR`JLk9v-B9dIXy$kfl#-o(=$yK0b&xGZspf04W6~DGg!`>BZpjP=@sV1u;nm}ut>*T3fw_I|3=x|aW0)^;sWA;T5Pd#VS!6Je+fER6ld!Lz(A~vG zt{{7-x53=#D4arY`?^Y@gTZ3{pUh~3c|o+EEkmU;&mMHTe*ZrHI20%6@N=RNUFdd% zit6wG@f+!nzW+M8txg4kKH17*>8JFp%%|V_t$!D};|mKH_&YsQQ6^`kV~o)aph$!w zkH>zSKD#xPw8K`;sJ_>t-JzNP0RZaY=`Q3yKz~~Dk_gRA;;|Z_1aFK*F(~WtjK9$3LUJ6M~ya!g23eT6+E(%8zE-cOO#3^>e&NddLWEvNt<;ZV^v_ z8dM(uVsmvJgQnlCesu!?PvM{qGR)+gA_{f_uz0euL6iZd zyF`eqa+DTniS(@ehX7$ZrT~UdGP`MVc_DRCE{I7019^_{-S*~o(BaX}UV0AWsz

    zVfFJEQHE1I?hd6em8pf%W*D^H%rU9(7NeS*c>MRG9bApVQlmpffT+(gIX8|hfhFmC zTG0yw=qy2Bpw3i=PL84f9Q)ECG~Y$`-MV$1=VmYP=o>=Rh@HA%JF6D$57{eZO{67- zEJL0_H@G!bTcPlzBb8L-mJ+=Y@awT0!+T^1UFBq|n6lM~j=h}q?)s(DH`>RiLTb$jYcCa0;37p10sm$=Fb3GQrgX;!NV9X&01=1*&MQ1}$lDgQE>I2BE)A(z;0tv}5G?m(A-oQSESy zxqrZcpBUJo1{9H56sjpyN>cBz1(d2ILs znd%Lq;)vqrWrfU4`bX^BDwiGp~&AteEo?M;N)v zb2ipD$p_*DhhIn{$`a=aNcZleydUXPt}$M5tzcCx{U%Du;t6}CRUk-(svlBlwLEMr z)h?+3zV0R-plDn~myQ57x{lmSka|qBz^c|Fa2B^+7WC?wxZFk&pW{gyK_F+@V8g+B zZDd%{xa0gr$oDWWNvp|gSg>I;aD||jfpUeX=714g9IiPT{_Y_Q@KiAVEb$P zQQ&GR7~WJ^SZtQ1Le}uTE!LowG>>pP8%7x_z)@=E|5z9_#qf;z;zU~GHHEaw$JQqL zru8qlA$bI*o=LqJTJq6+8{rccFapz|3Qmx-KqEZ33K5>U58woIpD@%Xhhrf_jZD(4g#B8w#gZUt6?m1D=Au>xImRP_dYL?&x(gI@@L80byRSPP=g8dvB|EP&r3ss z@D{@a*qnHO^v3gCAliyx?i1xzz%{CX^laTTm6v-MVLtXx?|Fb-)*0b51fqF_{cxN| z&HV*rc+SUeJkP)d(gJwq21NljN3WjjrJZ~4rhoAF|882GM==2Yj__=1g}aI0{oaq$ zum6Q#2ISKJmK2OKLb%aXnd42xLwAHn`1}yCf%0#$w^h6|HLCds0YyWD6BJ;O^F?9a z8QwSY;|Wm%@Q@^4- zK$aL3^~g0orcZAUP-@446M|%IfaG~?8?abO<9McVh0xUj%NTyK&nF@gPb&ZskdD~m zeCgLpD1Ly1|AUS7l%HQl*zg%{Qh?8Dc(I1@KH}7eHyLbk`AX`L+um)H&v4g>J&)?Iu!(@gHUXHR-p=ghUYza z@&S8zk}h!n@rh!(bYnRk0tm9_dlcN@Ehn$|5k{HCgXSn_;D70K`ttMi8r~*<^d>oy z$LZwmJ%(G*({~ULFGlVNUYFGmK4R#>LRz?fjqvj*JZ?oj6W7?+JW4>1g~eEtld~8q zhOQlyLdpl{COi2rlpA|#?tSI#5CsCD#p^mj~a(V!FStw)n z#K>9HKqwP=kFrvE6nn+9367&M78%KlyvO`RKml5M5Wp_5@R~&k{+xZWoucnMlUCVB zJ2D7Rl$-X5mLBTJX9tn!iFf+sBn6)17zHgnV=G$+(Y~+i+-T0Ks{9gga?LJjThZMB zBeayCg)d0?(SRZa$YB@JkC}6tB3GCq621J2$;&+P} zC(=oWUdW_Zog+WXdV4X5^y=?Z4`B+Tv~WZh6W5RxsE~f)po&2zcj@hQN}TI;+L`Gq z`xsuHL!{3k{GF^2@%B?61x*geZi-19C25 zU2tB(x12pp!0YT6^cLV38u2{!GzR#U$oxC!TNT8S^5!;m~)TXN)?VF3HB6PXJkDGz1tP>^f;c=2XXL zybk-zA%1dQaR~2?C@9YgZsDCQTZfl8#+HnHXB|4!bTqLGdN#SNa3H?rv5&uK76_Y$80y73u}JbT_6pr$}sv}~%C@&&jSQS}^v*66$`5_)&L z0Pk@O)!-#){d}J&8vH#-5BoAs8nXu`DQ-2g?O9ie1cprk@0d1c;io{@+;-RG^G6#! zYLSL;RAuSbp+nX+MoJ{lVX>u;Y?Oc%aui&PwN7AQ*|-4857}xFn=&Y50?#W@&N-jE zYdQnJG_X!(qDh2(ud|K^U^TM&S^Tf1v8gNuc)3W z!E@NuFqQ;#jEBhP%T}mH;TaDsp$HsB=}|2U!<*QzPz2ut!YLeMGptDMlE9pSX+jVp zAoUa6Gp#V_;p!HzOp>*$l6eE}$%R>{k z-o>i}k7y0D*6n*Vil_ZkhEQe}pxmTtT>vH^HX&Q4)eGUK3uw&4(+dPFV?Vq<*W&q1 zs`fqdt`F$pN|vcEx$xxdD%pyN?XdM091N=7d zL?0PuX1}Yn+|mH3AL}U^3qfhMHYkPlGuCvvkgn1ytI9PETh{Rawy?UN+r5uG1yESBBth8&hL?oyLkvSq1KQL$7RDpRth(R+o2qu5AF!9*Q@M z#Yh|J0Oti-mk1*M;JbfBItR}O6PV*^=zG9$H2If3l=NVpungT9rlB+EsGG~?0DwvI zW&hiM_&=s!{#(D6rkEhr0xXz^^xkW4q-)n_(~akTDP6gFCHrzr4qpk%Rmkxw!Y3b~QGCneG%6-;ge`gqPHmodJLw5oij^@P6Wn>5!gNfk$U1 zFQfw$*5TS3Ah-vZJtF$CfpA6nU~xO9+2Zmc43{21AFn^7=xUrX&79A}N1Pu@0Pl%? zKr=*Zx(uyYWHQ_dKeth0GoZ=N+P$=Za4W%oN6@c|a{*p?>C&ZC->;?RtJl)UPoJb5 zInRw}KgV$3d0QBukqP_2S6YIV2=AclWO zOyR=#k&~kh5CGp+XYzL)*%&EDc1eJK?SNtk*j!~(!E80=Km!d#`QXzUh4s~y7_MO( zGX=TnK90n7f?*qgU<3xy_U{#eAhL$C9wO{&3n;6j$1Yq-M{6HPH01OYJ{v{(3k>+2 ztslF{S)*JTXs3qaXhJvseUQOA)(K?EInQ9ckFqE844c_p-vsawuEv`@#(l=-CeqUf zYpFTg_r3^31xY#`Dth#wgFbKDEMCJFVTgxUL#dFoc}QL3d-M zj7zq!M+2kGRHG4c*z-d?oY5Gb$uvBj z47SrmtA)-m73UP)SRkcEV?oD*>31di1q+rMIb)=ZOi@^;toAu9 zpdCQr&CT@N2LQh1%phXtnf3Gy;HRA%x9ix-13py3J z#+LwCQUmaXhZZ@*kA2T3?+ftNP5!G|t#u+MXU~_QS|(9&fI#Cq-_f$O<8a zMu}-tKAclZXD%-EBZZu!wEzyE7-oKffZ9Pl%0bgybbeOAb*98P!EI2?!}VJ@r)+_k z88k{wZaLTKpz}4dqSnhja}U7_8vnt=?3Gw7Qk_2C2k{nrX0ES}Cw9m4f$1R@U$jSS zmYW3F0klQlk)BRYK)_-M#Ucz{{5h>!iHQxY1Bi<-uOQ9#CbWJC;A{1*i>#4D=`}m# zsTuk;9IbaE45v4!4v>IgPx1blk7FgS!zo5#PTLLj@Nn6J zCYz=2Fl*GS3|b~P4nPH9h~B1nTomRp#*XXS3Q1!2{-$T>krBvAc>-29-2b#x6wVT` zhr~;<|M7zh@P$vm^P5Df1*4GhWBx;41m786c4RUK!TI2YqGw?N#h#-$&DL*Sa)C_> z*F&zS)m*6>0>*sl5_x`kJU00eLJTv@5a1Z$;(hYO?6-@zn4G*SXS=%gkTb{9Md)Nd z(C~nH@nl+{=-EvJChP2Bk;?jUifRV3$DBEyMnY$XcV{?zjIadX3IcB8X8yB`Wca09YuQLGjlye*W-x z{vg`R70CTQBNeBE2k3om`)5EY3b4qN^>T? z^TGY}Go}}5onCo)GTr|8QM&rdt(cAe;Ng9q@f^c)jC7%?wEX<7v_T5RFgd&DcrQDk zcW1MYqJ*o2sxOjW)4)^G#FOJ3^5ID$CKO)U*V+ykP8`z%Vn0=3))8TDLz7d>oD{YVoD&fW`~9dtZjYt{-1mwKsJ}||K#-$c2h(Wp54C> zT{8X{0eA}UFVan7`5QOV@BH8YBF){phT%5>s}br&W()#W%Ih0x5YMU;BJX3hhMLC%v*EPJnfH;A)vRow|K$pGab|D+v05p}j*A)1h%Wk?$jeCh32Jj^6 z?M85>0hF!s(eS&utHVSga+%36A{Ho8#ZQ5Zz&PNEV=O#Im`)%^@NxqTDK03w<^HCf z4PwZUq=G{LAjNu|@)>#M$kZylt=g5Zm5WC^N9+qkiQbJ`lol;0y^{ck0!ea`Qehg0 z0U+rVdKh#M1XQd|S}?uGXrR4`bI3sHMH&dt7S1a7W@>tdls*72pv;0>LA0Z{frsQ_ zV^rd-u>BtP0_URK$63MOhB;3i8s<|9jNNy=x$3nV`)fqR!6N~%>87CZpx5vmFTNwF zivX7{du0K#`&2~lx1g)DbD~kC)uPyIgHkL3wsn_zGk|TfY-xA{}$2z9mcBv#y9>-`uvMm(>iPZKfd{`bnc}E zwVo00b81=~bm$oY9M|-0y%+n!;`^Z5&jA?iBoL{eq^#K@)l`;9;RAC7F+!(tJ@J)R zCBL^a$c?xz{0N=hMB@3k9-NWC^(;9#9wC zp3rk2U@a|(-M^cbVC{}+J-$zH9gaHXaTtu86@PU3yU53@YBexq8UrILwcb z5gKh8QbvVyCg_{zXxK>5q6fEb9v})cGe902!lDR5FJsv$j27_rNrr}<1({U%@E$@G znZ8ihyXzDrb0|z7(0ie!Cez!ts7%xRJP#bXaDZtS;lK!ivCa5P5JQ=q+f#}J%mdPD zipgaFG>J(eo`DEu{0<6S3uJ0}iPsUJ2$3v+MUQX`3!y@xMoy5-;n|wZJUMeJTO6Wb zeCeeZ((`}uZ>0Uzt@Pl-4^kPyeeLB}0E~PFu&A(};$WvptY60qXjq{}h}?Wv3>FBvG?FT3aSrM*#uAuS=ST?~zFbeJX^=!-d#AE!h3Z*8)vw{w^5{rboV&gc*?#a zh}m-++8=EJjJTgIK?EJPJaU@b!tz20Vg;COZJf?$XjH`(IbYC=p6NCoSe2s&l2)*} zKsA)YRJ#pXsXaAbE zRe%}VXLZS6dvyCA=>in95V}4*>81DHxt}_C%;#x`R>t%D*7yE6)seM^`3iUnpWc1| z&0|!OQc{V8~n1K1uh)|4T1a)}L!bC0G2GhuFI_~tP zHt7#IxaojEFGTiF1!mNs%ec=B&#j;u#v6F>-~q~(2~_M|7kRMt_I8Znp1FD@ifQis z%ip7Q7~t{MF9P^693I|DoA17xCU3r!x=b`f(xkzC!sx4yg1k!$*U|u^BzxnuDm$Vn z{o0zrhBS8b7t#^?-vN-M^{3I#F@5;9!>5~IHQxtD=0T^jX~-Zz>{YWQ|54s+;H++_jjN-EPO`&w&5QxfiZHBnwyVP zmU9(^6%ei^fI3ozzQ?oxL(q0G)-zR#R(Zc^AU%s<=^!yWI7OEUj7FZ#@O~SDLN_`dSfCu}O}$o*|9>B3^t49;iHP$Vz#^yx&Uq1pdbefqooL zU`S7osm8VIFgH>jFn{y>*ct=dGWFYA>3LL0+Z#IzKxkt;>WDBhHH-mcjm6&44!o=* zpv!Qby>x+Y9D1f5TWlrxIG}uTY$|Ph@C34E;n0adx{>F;NXETw)`T2BB5!VHc8WAN zJ%Xldv?-RI44zWfDgW~rhbk#Oop#!>w!ohqFC0Jv-Y^0d!+&^ofvzb4I$)G>M)jOX z=N|AY^u*#oXyHuo9f)Ee180ORMVCnD=CF70`WxDf{+#TU{pL^T8Qeo>gbYRgnC2ib zDsaCnMop&*U(-|7R(K;NjBUJ<8f_=Y#!)8kwK;Pe=XY2~5zlY0$^%Il@oYra0FM^v zaM-*?jxFoV>eUn^5=9-Jqtze#mLt}#-R>`@vUz@7+d$9c^mOM)X?%{el97!YJX>;qDhn#fkfRM6QT zEhnvS2+tYs7ssiCq+2_Pl+nR@(DHcn?BVV}B+pUVE@&R7aOo78sczAe=TMOvy5Dm# zO~HP%dZ3SZ_$hLxO)-)V=P@LVIM}YNgTD65!2``$3%{_pywG14bSF@Ru=t(aRezQU z;x*o5 zi~!sz(Yv|>W>OXz^>(<4pA$gqp)L{`^7rr(bDb*2gV$_9r!6A7);9DFTpq}=(-@&S zoHzwAiacRt++uk7+clg1YC4~}$3T_ zmoYU#;OQ)N`y)zV87)f*04k6Hvz)GtnavT-0`H&+;tWn@6>%+^sItx5=YT5l{u z)RmV2B34-HiL%JgTsvpiH=&g*YZ_g=3?QKb9Rjvs#Ga3yNtbKdEMNI@j^{8h*R%GG zy`8k*I7lxAR*(*E>)iX%?b2e%)ls|%ljY9`q$3=oPNqo*)l&3cR6 zS%Cmv*e1D}rNMb};=1X=a|{C@&1QaSD*fbNek;BDmtLVriBK_2K6gf{fw@cY#rmU% z>AB~gPuDM9Nvm{ESiq3`3`J@mzJHsQ7Sk_~-jK%&@%?}G$Asy>P6~|$W9zATK>jga zJLA>L;AIKNMZm0KmbWZK~w@=8px0Bp^!>57t;=c zJ4a6M$n0FRYm31%d~M|X=y$Soo|^r()|q`*$bLa~LX9olMj2(LjYWfc?avj6qzGa_B^ z_8D|;rSKpr95sgboMCu5t|+3Q^}QI^-jUJmABs7`>zl_?XDXK4|2(NW;*vAn2eWFPbahSm5rR%9kf7t;F`Nqdpy0jw0 z17}|P9PRr?0B-gcK0hUef@igtCXd27A_!GD8nY-mpUJ#p^G5al1^`yJ0~AzhdSXnO z5$uLX7<%<6EHG4AD;u-yOGL{AMckj4Lq=+dny)QbXA8MGf73ugJLxftP$yo;QHrP3 zd4niZjRu4gND`UD8gSl+F+L2i6+l->!?38YLF&mlMv*Bjw!Ct5yX}%JWGx}9n(&50 zI_!eu*kS>BI1>Q4M*zYSMyqa9J=_gKzk+*<1CJ;QrktB}E1Uu72eC6m3K4J*WU%RG zrhQo4CNRz5RN057b-3hkFx1f7c15r#OoPCpG*bqAMtjujbyv^0U*=xx;p^)v_8A$; zBS&Fw)U^m`*l!2BcrHeQihyu=-n5k>GR}NRf#JF7FnqoYSg#|yEJ&9&ErOI61j%-i zkVXYIf+zEKow}wc`xH7c1w~!ZU>#VrLDLWbo1`}IHGYQO1f)@X$^Jp?{E3J5e0e?I zD-iVk5*&2T4f$t%2w1`2{S4ph$fn=>o&PU-Er0ahN2H)Y&(NED6Q7s)kR#H5d`Vbf zn*ODE&DZ_M*>G4GJUm|zC$CXx?Sxm;6g`Ik0Gt8L9Jf~!qYu@7yvhv= zW{uAKoJCSBoxCeQ`z(X#1S9f+A$ca#pPYDD)xoOu#rlkTd-5{09Ur3CG<$}xlxw&= zBZ#IVV)(+((rUE5^>-ua+7#*PVn3*Kc+5BroD}A zHp+}XtN|?S9?WUsk928utKqgTmbq?n!Ks3Urw9TzjO5PgQ4~#k6FsR;^DyVt1m+^e z7rt)0mmCX?1L6H5e61VZ--aLp4Dxpm);2snEZ#hzvdMe13J|VoLbg?WHeuJA^w0!% zPKK$U!$ggfI3(gs@fizBMhRyQ&!W%^^|XpmuaSpjdz2{-;1H#3VW)stFKB_V)wv7} zBX^OWP6!(yhs}0CHdfA3^r1JxqBZlJTCA(@>7_FSo}s7|E2~*)NxU-U6 z#muAg#ah`@-VK2vrhOmV8Pl z#1=?+ogkiQG>!MxmT5lADW1O~6WGS4ESw_!0)m}Lg;3XBCAQ#7`FEbBO{~+ah>(>2wi&qX8P4%`ck?8UqGP00Dw3o_lQ008QPdJZznhoEiVZPR-2FvUPilh**R%9+;x{@|Kn`vNnDiyIRYj}^9 zdjj|A=U+(&oJkv|ui}AJ5Oyl3=~PBX7#-u$yEaQP6W4t5@k+Y?;r;MLW&VGj-aI(+ z^t|tT`v4lXn)}?@-PxNrMaxNSl9po8ik!$%C06W8@=vPbR4S?bomAOXNhL~L zu^d($#inH05+y~VC@SX7U6Q-Yy=P`;=fr*AXaIcx=yTljq&0o!t?7%zJC0yg^@rE272ZgDY;)o5`9M!J!1-@O|^c;f0b(Sr}u0nGXRClAS?J(HH_ z=3tyxX=%2YCMzpx>gu)h!Ovb#Wx$c$25g>Q!y{kYSm!Jm+C}lO$HSyfoX~#7!2phU zE|Ftr!NB0z{xtX2ZF*I9rRTl~GhN?Jk6wEX=Y%4=!a!P^dz_}vUm`Vw=!!XHu#pZV zsp0UMIHBs7w$Ws?< zR$&cVt4j>3V7%@Gkuc8dsp%<5?I`Db<=!2D5uFck=mpgdmN?)V&YF*WIdvJvdY|i< zQ?72TBV$K^cG*6w+>Lx0y411C0`S$r?yLIMsGAeKtXS3w)!7&5OBT4h=C+&nTRXJ) zX^b|EbSK#-3PjGZQ!Ec)+1nT=Sy%yT6wC1bHuVzCI;lgXi)3Iu`-cJ5M7+kPhtW}_ zS6Gxqj;O7myhb6<5CWZ2WKCAnP6X;Q2@!L0PdSsOff#|XjhXDUI?z9z{9H&Bp2d6D z?r`hz=jR1hGEyC&5~j_gAJHMBlb%Jbv4*=vI*CR$4?%Lm-?lPxP=n0p31@(t41!LC zd;ka=Mi*oOOecg}^Q4KjkyksAQ~;6(IPndhuLl;tfKyAzLYa%LA}>Wje!WofiuO-$(+0y;K9CPiv`9dIxR#98QU!Y(KFr(=~k|3)CK8)|E8Efi#(>~N$rfTzb<(=M=yE)Y246df2}f>Pdxz=W<1 z&&GQS-GYJjy`;%wC0f{E41_|5vh(x!As-pQ zSzE1q_*(iV|raoM|0wmpiY=rdKKmTbp0T zJ0pAy@XhtCa!62=T;LexCqvMZz40(%Fxq@Ct0L&^)uVEO3GbQT+u|CAO2bQ!2jO)H zj!N2tl*dpTUN#>ui|`1<1Xe0$Ry>44kgGRjOA0}N-WWF%Y(V%U9t4CYay`gj%3~GH zC-OdJOl+-X%+}Ucdl-b88&9}$cO5nu#fH62tB+`F^@vJ#Dhf{wT@qtme&_8}m7T>f zZIK(&rgD3Y1Kw7kSTGk(aL(NTEQBF^d4Rcph0x@G6@iKWSt{zoJ3^sC?#I79Fd8Qh zO8{Y?moGQUYZ+eWQ@9?I!A}Ib{K|6@f9nB7sM^=ECNcSpzh#pPthw*8_p$HE;NhQ= z!`s7w7VI19$`=gZ;89_aY7~1F2T*Vbw(X3(%T7lNe#SlPCEKc~2;i?_JOqz@0MA{D zL0UX(6UU>8cWdsN`EekBZVIoG*Kg9~|Iy-H%3YX9AI(2X(`P4A>FiLtIz5uc=wa$` zf(uOI%5aD?+}rF!QX}~MZ2Xx4;)$-2W>CXX`oXJrF%Bv14VzO;=4D6mENdu4W$p$3dECNC`8 zg$!Y!KD_-n0Q1PeV47XVktA|PK2Q4DPd`HbaKs2t&OBOR?P0@;j_hO%BQ4Qd$nB

    TiJ{AvBP>0MqgCg3RnGMup={GFMlQi{;k?^!$~`ol=ZO2X z)#WK`K4oeEX(rqxP7*he&-VS|$IY%w_sg1r}Uw6F#^srp}Ow?-eKo*V( zG91G?I5`nEWFP%8d-G}ZBH>`{k@hsktdc^OrAT6ahJMqm&m-7G2PRn{Wf7&Ja{*Zm zmU$2-#k3Re+xC&#l2uX%BGQUJm_X-S#J#z(%l#osJcH|0XZ=b#Ea){G9%oJ*bY9BXz?s-e~R4k0O$PNd#86-=uqX%)kkxJN;3{Lb~}d1UG&Ura}kWV4dL zz_crYe-2Fz<71~|6(w>HyW z)m`+h4uqYy+;{h8pQw&9ro9vA*;5w*A@zy2ah(4KT@nBe(UPuC#4kfsV+^}s*B0K$ znnDcntkB`eKhJ~QWpHXNptV1~Hix4d=SP5IbiBhlWjGfFaD;er07u=fH?~8QyfTtF zJWX!9X(Jpmi)Nd2zc2+u8F4U8C)R}4uGo)b9IX-3KO`?NU1F3tQ(VP&1KfKD$CO1! z_W_W3R&|L>+*W2-!MI_2Xj&*hTT^mQQJNZmffqL(?}*LiA8b~^@3oX%#e6+qq9U+$ zN%V;06BKsAjfv5rtodF=ZT_XNzsO+~oCw+g!+6C4x)20zOeQ#(QLZbaA-gA-vX5Vh z1H4P#lU39?yz$YgNk;5GVQ2-OHpj)32edlZL$H_yZJwmcsfysU?WqE*;f#CP#WHz; zh7frn&tt-KF5P_Tg|vaEmqqCaI%R1(h2wO>p+AF7wSrNv812PFBP_hn10CSah2VbC z0Q^iO`}1ez?>zt3JMZGQn)`RryO#sZf!0f5U$__Es|UvKZ3SoUG9SXv*b9WTzyA2xrUHuhg0QYCA1#h47oI?FyJ+muAwkPweGVXv&_F< zXLNc%xoD*9o!LftyZ4S56%u&pd61ubZag`U?V1QcqiBvj32%=zu*w$|&oiChUy6xR6XXK~&AI54O_tT za?p&QtV+dA<*GlAp8mR`H z=^jQ?gING{TS48a*$u-GExHz&j`wgMML?AOGcVK#2ADG>V76OJg>^LV)!aYR4xF@Q z=&=mrgSO*&sbwI<+9bqpRA(PxXhr%2KzH*AY$Rv)jC<~IO-Tgv>J9Z~Iip2b6~Tu< zpvC@}51#9?#~3Jcgh%+dfZK7t-kTwNTUoyMpTCn{|LhlH_(Bf7>rf8+aht1H;9ea% zFvD5NlPm0?m>A8@y88LDGDa0`4#{_usfrM?$Vh)v2k7D0A#wllD_k+uNQSE1iL*THw3P*zMK6@1?w$QC`<0S)bW$mGzCY!r4BMZ(VW&Zl3W z9V#VGsZ*QN<-Ry23|OJl4;ac`!2{?A&yNCFqeftmwM1ocUtUjA0jGlJGo?jvEr5)+ zZmg}~wTaVhKDeovrl@pqBI`J9p6$CIKS&i)pal4(N%szUMZXOZnWOT@=lE>z-`#l?LQ4Ib{fTU>XYmNnnLB$|26R1Y&1IkRW8_XOJFEbe|C9m^U zwy!We;erXM{9_9tf9?mm0bRl%EL#Y7hA@a#$On6S99oSA;T&EU0s!-J28(6~pAP@Q z{}AK@oT=_4Y$}Kw&13jsjfOE44L1+VIKcv^huh$IK84TUUS3IufBqM#N*gmw7(c=b z;+_gH$YNXp`T)SqHSq^}6^153Yy8rik@bc>gHdPaV#CSajpr&O)Q0Z`^D5R59Iu0d zjtNr;sbDS^+iQiu`D=ucdvGTdG}I};mG8X{0EE~Z>(YrF-WEd%z<+rCJU0w)R4@$+^#lTK@|VUg&u~2sCGusXYqA+YbO`|i(le9Y&KjbxcsP716no@*DmRQ$~o^KaH! zh3HbUMtb-D+m>~GK%jYsvh$;W%meP#P7DIg>G|2TMZruJ#Rk+!zw?j2PWbBy+hj{L zD~M6Z-0R=|$@kLl{I9=7TcHOW0(&6J^wy)O1ogs{H1fS~Z=`2F`Eq)&{0IgFu&_9j z9=yjW+xf|Kd2v47xW1VF>W}`A!W?pXU!or^MsFMk;@;curG;moN-wxQW! z2Ma|#3;^q5*mD9;6!Gvhg*<@!3gHZ0yADRhoVFRdE}R1*jPke_Ig(LOSj$DwE=RMClmw4eIgDvTa^&d)th^*Du{AeMpQ{nUK2 zlyaA@07%FO;|!pQLl%Rf0F1X1EaauDDNjmClg;b1_5v>s2m7X8K+G|6Q8a?wme4V_ z-0LB;j}jbNcL%DR%<3Ah;M`TcwejkD$v|pCFw~kh}#@2Gyt(0NJ1YH(@w~_a87&%ATs&^aghY^JaKxzP> zQRDY^v#Im9^({qyCIG3BUu%lUzHK74|H!COf+0ZlH2I%`O~HMawKJ{3^n>f9PYvTx zwE@8+078Me#f44;^*GoI*j#dzyZflEx2;s94RDOh-QTY>DbQ&Fm;=d2GgV!`L@uvTE-m9a8RxNLTVXi=4v z92)=(4HK#dgdOkgLnxuQd=1ax04&*LpW7mIb5Emq5glIUvzo}Bq@#|5A^9lUN0#~_ zKc)e}oC1a=nxf?x_uBzidA2+wDlM^wM5d_(_OUlIP-Ig^SOWpDZT_Nl7;BkDhYA>F z-CL$N;FuPX(bi)ke?-8I>O^##y$M%@Yq3~F!eV2QIch*yhZdj1;>P(6Jjdr1uwk}i z(=Le*m7@*-+Iw(>x)H6T!<}HjEd-2$L3Ew%8rdw~!g)o$+#{pN)ruk@%8@d$(Q*M-0%^eQo{I0dwh8&o)b zU4&2oo`PZ7U>Recn@u&O2Q$KkSmoMdaSMc8t!zBm+XBF%n0ZeRmWWT`bvy^ZbN0B` z1QDi9!puXkIeV}OdD>VR?y<^K!*DTYT5cF%%6feL8^4t{U_sG!6cC`G5x^l7-rYa` zleGWged3`!4DS{2c6dhDj)!Aif-2)PVIsM{p)EH*){|eW#95SJGPztRfzSX5PmB|G zQPl_vENiJU*BH2t{wzL=HM;=UR3EbNCUskkRNdH5Gvp<=NhNSbd4shuKiOH;0wE{p zxu$ItT>gA3J$LgJj45CqL7%^NjSwv@gm6Aim=0wwt@k-hF7qI3nO7F@0>h{vG;xTy zeHC1r2Qi0DFO#i`do{<&Lt$9TWl>1ucPL329rGbo(($bE8xEw(kmn4A#(H{4gX!hD z9W`nL>8QZhZ4Rh?J&#ENF!V7sMXoBwCHlYO7Ot&7<{%Ye?rB}6G4W>) z@1$9Xpe}${xoXso(u>bsOKXHBJ-F_zMOb!-fItQi9Y&ibQwMBebMMX_7!BAk!s#u* zvW&+xfOilEyaT}5LMF{=&7$1uFniG~93_*dSgK1`f(k%+@Y*FB{qF+sw$d|ie3Bv? zIz_zkLMo6-bN4$xOPPhq^xCg~Aq`_R|Kk7nz4ZET{5OPhj?>!x`)QhDpbfw?cz{AH z4nOO>cIROlMWI%oFuIlAwkLo-+wNuf{UG-tD_tH0@UV_`96$ShUVG(bTBmIi1*oP= zi&yB8yGbjT!F2PhU!w4@n11wozn6v=u94Dk6Jw8JS$hOnM$WD*q}ll^Y46Enj34VY z1n35ks$4EzzM3+7O~!CPiro4R{XkDv=-z-bRB6~(w}5UKq%BSXAW&o9tn8o08*gyV z#sRmZ)0flnI;=QO_&$nojlAG9CK%;lRj0=;BL{$tJ_-=IWV*B13RY3z23v^XP<97Y z_WYXzj-eq(k`R~?3@gfeq>peskzK(h0Elzb>ga7HJsir#~f0X$>@Ikt)OY+Cdo zvs}lBoU+~yNialht1k0s1*djvF^!{`2aE!|a}1}6-rE2h^>c)&)j!;)?Io?|S1;Lj zb$5M@&jD!7k?w+_)E<^NKN1>6^8M}2Y~ht`p8dLQMOUS8%ITgX00@Z{i;%8dxdMoz zSZxEnj#G;_!1Jpg(c|a=?U2PyO^mV~7~;82)c{xvUi|$C@^1RdIQ`Ry`F^x1<9K28 zM**)ACGs0O1X(~|CO-_jw8WV1dGhjSv4^mno4XiD2rHTP%LO(rXxI~2X5SvUk|!fLD|-OdAAajw>39E^e}e7? zTyg)0hmbLVmuYmw6o7G@yl>BrsbCYdHMBOx2-!4g8-}axSuN;^P7=tPROdbbYmVX{ zKRBjnaqj(}4F4t(q6X*i6d>4>fyBva^QXh!xF!~Q+lH}-?ihpl9LJG8;j<+@5b28) z75<(=@0$B7^Q;Y&$gyW{9^I(js_~u*`|tEYT}98DgHkMv7d!ZR$057F7IL|65!v8c zLRG=Lu9ayfu9wBl7U+e|7AzurroJiPGP6cvM{#UCiGn_Xs-B(&)fYIx8Y0{A(HVh= z!^Pkgjx`Qoq-5aCSa%{G@qUycryQCpV495mer2+afLK7vK@^UKt}!}d(J*Kxy2BvO z!T8#z);3yW>~)ZXoP`?a&ol;mQQJG$sMi@bTZhk%AQ^O^?K>lNL;1w6nd@pUzc$!T zNEw-ZbdvY(q(bGu2uuJ|)>ih~_sD|RxW0oeTnq14{b>Dxj`*7 zI1m+T(9F|iiCA-b2g{11R}aBFW-r*jkoX5mkQ*K4pahG^tDCS5-n3z?yn@5c`DKHB ztCD+!d?hb4C}*2D+yBu9%8nmZGs`&0Ite7rDYP4d!V^G>YjPUGMkqKjOah_xeR@2) zg&r0abQC@D`NAbVD8D=`B_8mc!vul}G@k{?7(#Y>lewiGuF=EL$ecLV*06g6R?@Je z;g&ke6FK1E?{ZIiqJFl386$#WW!Iuwvh86)h`XdV95xw!i9$0cIVaHKV5rsgJj0{n zKI4n)n5*MZ2tJr+1z7V0(HI;d7re^YBDI`Kyc}M8(WOE51f$1ci+^h|5K_HQ!JIzK%#_QY!}TD$P#c{b&RXAiH?{pF8B z;t@4mA}BDVuIFu4=qx#$%HP_f{q)5zzsB%@JLLRWbOK=K_p6L0y*!;yiwlcs182vO zeAc#8e8w_e1)9?uJzDTkeSFHu)MJ#8YdTEweUFhfdgJ3%gj!iD*mlfejK$Va!KNx( zo|o%S-=N3jLuATE;+zle@0{neKV%1xW6Hq%)oW?x-kr$%D{^>k$8*jZXmGEFegtH8 z?#S8A0lDmg;41Xttx*_thQT}oj3~D+e(5vpD;x?oGbjHO4{3GdKLf;^n!uK=>p&m!vxp3Pi0-8 z>VdU7*>s*m9I81OCkL6B3*INas)zG7PPo*86B?;7sH~^m5c0^UXHI6&Q0D+3+crrN zy@4I_s70CpuPwn?mI32r1jPB|A=sO%y z$U{LqsYpy&;~HJSqYRT>b1Y6Xzx@nmOgnL~cR+-SqCD2%h+^D2EyRdQGCgt+JA>h6 zEd?Ozfqqd!f_|Ooo@rVbm0`{8n`&*xIn3fIk%<-r44g74hj_W%?>S(8)FPWAX>mlS zDmYt(+)(=XeOeyPUj;0n)dBv;=(bVL;Tgom=Jp{t68nOkvCD(Ns!ow^4Zw!C62R{q zJDQ`9YaC*23c7uKiZ)_Aqt1)=#eJ!5Q`96o2aqmQ#?$h>`{Ww#(IQdy4`v)4;62QZ zmm;@1^1rb;b|5%`#Bj1%^vG7PYO?@Dl1=6ZI~h$U#?LbB@4lN0Z)(*kpWQ%Kn(ViQ z-d%{27?6U}sUY`uHnMYz;P3=zM8+&j+shjAJBS?a((Ba099~;S)mB`i=nYc?)s3>g zMk%Ssw>{bCB5g^=p~I&vV(9t4aBTZqS5v-Q~^=gr8a1%O5wWo>2p)eVtO3Aps< z=Ei!S)gwW?anBuy4cAx}S;tgD#vECXxt&1}VnE$R&tRjSer@l-(qf|ukV5DDP8)dP zR9MRb4xz55y^nt9=Nv6`n}nXVqUPk{H1T=Sdd0n4uIWg7TUZ<-` ze0Fez+&i5#qmimddl`qqWAE7zE-v2TdvtQNV9>_6|H!TQPJ7O#MN3m7Fy|D;JH^c! zAG=l3Q&qQJpf9sDn(%k$U zww2bH5KxEcfEqf@7L|DkASrVxW`);LfgU71p8+;RkAwekDL#M)(ThO(;>HE9%2NX% zWQfgw7>@IyB9E7sS&65z6vSASD3vcbbQ7W|NeIK|1laWuA+Q1;L7w@ZZgPYML+B9- zz_y!i*rd-NtHYxLmDiSen5c->8{+4D1Yc};$UGpDpBwr%0o_6|D_IQ#+ow>W5=h17 zarpLOBg`i|0jyjF(0DI3jBK9XMXrfJFbo~fZTK++24RRV1jO0&=CHXlW**yjQxD3# zW^YKL0xPoaz7}QWw!70pv42}g>g-Dfr+TGIso!rm}@rw z(%ckzEW93N>uU|Onv?B>yC(N8->}7VZNUPS$-l7?_VWEl>@T^L9N>8htQIa$QRq;? zgNJbg1bZKLIB<<*W$dgWBcROaaa(Aol5%Fcp-H`Z!LiQ~d#G{?1;PV+TL`~l8iibC z$N*hHqQ8%VrL>=Ej9TVz54&F;YVT%kE~vH(6sl-VfGS%y5YQGoIAK$$VP+5XZq$#Mm14AM3*Xo#P;yTUUbZh*T`PJj9pE+q0(d@>;{u0W`I8Tr=K`{IXCA zb^J5__IEn8ur~NpFoqVd@C}hDtVn<+u;M%U=FKJjs33=09EE>%8;2)+92e= zvARKj%7e(S)zEF;UnWnQjwOJpW58A9H=@I3zzieGd>Ngw08V1VA}xW3a%A4^+R756 z#ybQb3uV(02AgItWlCaRen6{%MX+4IzLQrBC8*@Ab!lnYWxt~7Ib;cT1IoGy9 zw3CYJ*YoRhY6MNA3IK^vJM_SbRz?FY!g8zZ*?NXd<^kr8`>k7zx5qUKC)dFD$KX`2m zmu#_T)Xrll`y~jm%ZWvH1w+Khw$UQlE#4CHB#><&KSlJ^@rl*;b^^h{a8LPTB+f9t z`zRq}YMb`ih_5LL+6D4-3#OYEmvu7l;Tl zvS*5i%%giQFl};g)(phPa&Lhr_)L!;FA5oxl{G}K9NL$jZM`^ZVg_vmXF*#NX${=F zj+08xza?wT-8JNIF0XypeeM(P)3av!rY*WI(j{y)NEjcYp$WM4I6)c41xT z$AJ^PR-mjKt0U?9yAGzy2vWU&L0%r9s}i+1P)EQX;kFaC3(3U*3J5vY+kq9WJsX`HRATes(~uj}zQv*u`8r**YtukQ$>5O39!DD?D47wXq1$&o<)Tgh`F*W~_%K zNa#WKo*M~!kD9&sa{bwxm_MMw^bcf126?EXTN=ojsb8kj7>#mekBL51(N$GsH2!e@ zoStj3nt9I3nX+S1vSiB;4vgsxK9f`HhG2-t=cv6w7V_M;{k>1fqgLm20OO~mbY^>> z{`&up1IVEx4~l%e62%W$faMSZcDA?6nZ!i1fHCNR8&&XR-wH5BToFcukSRGBJYCS# z!Q}GP3ZLMD9pwMqFgKw<2%0=#>}v>^0NBH&u*g=+*4a?Qc3wvy+UG$Hw|HRB*}yS+;F(|Mpotb6RA%w{7}|)Kx5^@ARHpqj z_QK6nChTl6QaE)b%@C!P;P(0p51DmApYw&1;Tb((Uf=Z-ze#aRM)d%URYlP#m%vl83 z(A*R(uHk_N+6tLZGe|gk4~F9u0~wXG+|V}N9GZrQxi9a*)+6TQs*ud5G;FEBc|F4! zDhnMM4@CSG3JDoty=C;3Dfd}fP_W~Gux~M;4d7z__u|5Adi9rHOt=2-XKC}{a{9vO zpN;D1BH+A&H)j*=YuBHq!kC=gNhU9GknApD8@rgQ2MBxgU?;usiKo-Gd6+itUxr{1 z)>DSkNlBZV65iI&N`z((g~I8jXkE_V7Rk< z#sO{L4!A~x^ovpUXaMi|U}c@T>bq$Q`&J> z2)SdlXm?h>_aN=P_kJ)s6`asBWUWQ+rHa@HRF3nYF`bxS#29jhkV^-z;Kee6ls>X% zbDxk?hf&tq%+mCJ1bt8f#P<&OQ2wOx-~{9io0I>ahpjJRe9AqdN;Mos`WoAlT#!7B zGtr^j1`320Q|Kc=uJ4=XYz`dm(AO505U|ih$K1>wydv)azx{$pkzqm(! zylsw}yw{Q2EtuK?z_k5mqm>r-x3+Ybw44RT`4YW^!BxJ@ftTsaakjlLdjZ?0RNbnB zZeFWv;0W>vqqCD`K&ovb&AlwpicSAVfFVQYzRDh-z%Iui17vUQ;Y#|=-~6v4O6q!# z<9to>ho?!8VBR0%jDPa8pG7Xn|K+nROlknA&HFSmCM{KGzh%PLH#Y+eID%RRQ-?}W zXFl#3bucy7)Km}|PE#YI6&SK)rw3~;WDot`ln_&)OyMX%I2hd&90{V#As@zZuCq7R zM+gel;qokkTt~Uv$5^{_3MlDtUL={cTayqX%OBqb@Kv#A^s?nX<34z;HZ5!$dQrAg zIrUulx2&q4t*x|p+DL-zs7|OgIAQ_U*VidiI#&o9_k$i|1BQ7X?TT_7eYDvd3)IZv zmenLZ_}u zdWy2&LPwicxPy)p#GPPEjUwo3mcpD`jFI-C0JwoEg%NrJMC zZQo(48ZQk7*g-1i*h+kA(n28%XXmMl;FqksC@*X_c7;QUt>F**AJJuGg`2|rV(t8F zoc)OUv%f{s%Ccfj!XYFnz6C|CmwgW6zIyX|7^Le@8|Mqa`iLftmc=SMIPX$jv;J8^m z3KvF&reezg!UTTY4vK+Zd42}}P%sQB`?rN_yn@AL;oJ+ZXXqS>g1H6Mllxa8H_S@H zql0bwHZlba;i=L(CPN{DENq)l#H&}XGcpfjz?^0KSXu?yo9(4X>|2jbGBm8fJCwpN z8>XB8>!R88*pvo)_8QtZzsP4f1L#DU1PRWu1_EJ2STYcHWRM-TuO&On1wGVk5PK3# zJL`vO@bvSCXX4P>t}4<<0OKb5udS{#a+abc*a*9A*jslHWm^Rxl>oYY6t@R!Kue=z z!iSTv+ed)6OS7*5$o3<|Vg&aD`sliy6g^x{>*E0uJat`h&|FMp6;{LJeZkVe{Exy3*Y07{jH?$@6} z0l}s+earsAgXR`u@H?DlYA+@sTj zl+QJCWZ(RE-$_q@?q%}nj?(4Im1MiKm%sF-ROY>RfAX{R>X*J4y)SoY=Vb`+)i*w$ z9;_^3m|zFkyEf}KF>@(xQ;0VI>g%b7@w(D0fMbFZ)> zM>~w5T~2eae=6pj7tjawJFp6bu&=-IS;j@S((%e_v>q$YUP>eE`T6Erc=Gk)E)B|Q z%8${^&^2QSV7B_;e(FA2fw{gEsUIiga@zO150GqE318tbIEiQo#XR-Yi|LJT{G+s$ zxA{E`H7Pwd*RQ_wQ`YPUX>0XSI>0e44^cQpv1?xk1exlR_K>BBuKsX|+(I2o3=ikA zOyAxDXSPb=n#Kt&3gF5ZGj0p}gqi!FiN!!+4zmlu>4(lWM!` zq*DMec>g}5ehnR42xto-U@7_AuuJ57JHj<`RXIOpit*HEvPKpe*-q&cP^Rt^1dkD^ zunJnkY{6TEveh@}0;}+cabVP|>P-7=TIeCLnOVf41&}siv1%004Z+|n-dJG&kdM7R z+SZZk!v8dAXd8OOE`*^%#gbEpo;H+%p3_lQx0_-S%og`5v#9!Wo2rI*6-w|G(6lI6B%YFFkP9c-7EB2Kv>O>;*l*>l^CssCDl-3I!?!3kZh|ga`Z!HoLv7fEiGO8y#-_;3`36;b-hi!()IK`10zkUVQ zAEQPpOZPOJK6>YabmP_6$ai%B346*7SoJM{%J6rN`w&h9I0E$AIpU(e#C%mi3AV?m zoIr2@*(kDp!OpUv<>4s+CrlH1$f=<@z~9+1?cI)d($9YSPWsB%K8phfs9~Rxme8Aa zpa`G`D6`^UR%?O+v&##MskcE|8TZ*Btxxv$7+~YjhCDjUv>8bs*Uvf#L6(sX?P;X6 zVbcTyg4h$g*+2+5V8Muh;d*tnvS*hab4Hy=cD#>s&-(?Lwx^1zeSCm`!}kk#%@&#mJv1|*D$?$QAa2M8TXkBV2GV(uYd7%k=u*&!v00IKkH=*miHsb)rsgsVA(-K zj3F_k1Mio(LnlJJZ)7A=C$v5MvB)h7K(Twlej;Ojmc{luk+ytOf9q6q0I7B|Fa`*78 zv*9@L8ZH{=scF=lee_(9VV4~7ET(9pgWXIc=sK|4$mn{&wM?;-l@4Xcr+j*H{mzzwZCIixn3?D!hRe4*Cy>sqJ{g8 zloLo)`E#Q&^%GM1z)$w1{zVkWRHhSqyt4-8u1g+BVv0-3*f!fal5xxTc49)_#fa>y z&sie!jmr*8!5i%%Dyz41cO~^noGzhML*P-`1Lo2J*5Zbv8v-l_4{MmPi{RzS^|Rlb zv@`#Y;wle742Iv|}K~jh<<}i08@+SFhq&ToB z{E~l=7Qklb2`{xZDuQf`P0<`kTrB|!l@zQki;Ylo8SWGXXQDMBtYZ$eUacN$h9>tF z*x}Rzv{{sZ0$Bg>{Z#!Ig|xZ1gW}nH|c=WR5`|vG5-zC?C--ma~=cpYbZ`o%-xX%E|oY)JG31Fa*=Qt=V4Y@G97Uf}z zNC#tFVBZAO{sEFxTvJT%-MXEA<=0#Ck=I~Ew&nZXb<@E|~PSBzM0 z>~h_Mq>y`2=s|i=gcKp^Z6nB##L!92n4H95hN)V;81O{9^16glF?vA zqT(+Bj(H>$C_^t;XhE&4WtlKtbTlx$Y<%=8W!l|$53N6oImkR$G!`f)*H0%Q$Mv(s zruQh2DwOxaxyQ*#rBI8+h46_B!{O#2pg(X^u_B?*W~%`(QsEsc4AL7BlP1+tHIhK0u+?XnYSi9UdB`f8Zg^ zE^Ljg1qZtmFq7c>GVEw4AfcVE38=B_@>^Jc<3Kl=$H&E5m9hAy2W9e`21AMcYpNmqpdLO^r} z*<4)?-@qtuqI|b*+yxjCYNR^43ubW_R$%DTc_yn-TmeIU1atiO^kgiSs_oMA5)d%P zex`?p!}0Fh;llaZaC7NFWO59@I=+$3jC&E!C`zf50<{wy!WQ?XQ<=tSduDquPF0T} z@?T|$gXddjr^$Nb7s8|+Qon|bqECoHlS+WBbKZ~XZZLp+)Bt21IAHnHv9Plav)hNF zhRN)PeH!Cj?tFZUusHp-cQydK8d(hlOf`T=#V)Aq?4JrxJoOCgeHb?0zY*Siun`vM zUp$6^?j|318>2ryP7W7}=?4VD3hexow)&k&$ zAQvk{12O>HNEYy!s1<-;9pKHLXh$R!xIgd3@b?JLgHysr$4G6W>&W29FncST2a8MI z>oLz^XB?waj@T{0gm#ibvAKo=h=D}ghHmzD?}SLh-TQQ7nGYK{yjgUH;qUQtGyHuJ zQ;7t#Qx|3;^HhVqD;01SkyDv65E*+@q`rju30fZ6;bPeQVHR-DB0@w>P#Kv|?}rCF z<#}It;jhDUU;SJdr%0?5COWB}C!f)D13~;v+BVHyp66L1LjV#r0GD>H8z8#0yb*R` zOofdyWBmj4Hs(2d32jRb6^Jz158kPC2k0^7(=qd=FJNl0GpJ?3G4{%0L!&ImdyHfU zMC3TLCr6~uaQ#^Dhn+M!AYk%(vi!&Vu0+=Z2Ngs;?z;25j*m{T-+P=(JBeuN1=FTC zoE47aC|YQQ-QZ(5*PJ52ezX+;H4D{DA+Xri$byyrvXf4BGc8a+!Ft5>PWClV1A-DW2YR?4BRuLjohjEra-knboV()L zDx_UOM4cjRGG4A*8O33%sw99Q$0C^YoaEoKxI7^TgNI-vs%kzj!^7BP9hsrVYMqoC+Pa7(;LkC5Ht1@>x;bJY2_ zo33Z&NL_mksuZ0m-3C}X@SqdH{&g<09uAg=dqr>v-BnS%dT364k;QZlySY#-H6O$4 z0EwS}?zynAy^h^x!2ysNuGfjNeEa?axq}=WURmLqHu@F}bMr75c(;J2I64$Vrx2Lq z3P;LS0sO36QvvZg@E)6beSWIi8Ux*hH`U=ku}{P+k`|S@p(xJ3_M)Bdc<) zz*KdzM~3>Y!S2aijI$0vwHRUn}%aEBN5Oa#M08bll%ckm; z>BZW!P{Zt(?7ecM@^`%rp(uP7E%l+;*EiPy=Twga(4PG2lW0e_4#w5u5GzXGC*=R! zIRF+TiSOOL74tE_^7TiRbN<0zt-a~VQlJeT8>x9C?` zLrHWKcIx7H6+>l&XR`pO9@c`SAD)x3y$g%1DYAmGP6CW&PjosP8>?}cMb2kZLEd~P zf}184*m-0$Ne77CS6+#r#9|Cv;ncQwkXKw1*obDC;zhvZ*7fU*TxKm;rzGpA9@$;r z4JjD~oqn~U6 zg@xjWoOz50;ST@|&I3QMa5>rcBs#9ZwPFaKb7TZT236o>exxILGZ>0GEIG@^+LpuS z@;Zi;hCC0hdjVE;cPfnuZ=L;+gicwAx%kEZ*O&t!ZD`lAxi?H zIx=5nT`dx_rIN!LlH|Fz%Qz-UfWacpG5SD*W3_#3JqWB)?$y_xYaPg4QgAfPsxMob zS>fM=G2h$T4Bf*-X;@!Lb8~_+I6tz)u{SK>p#Ye)5G*+{6^-opdHzOE#}Fl-+uMzU znK~milB~IrJoEo^07@A>qgF=IGPzOo7T_G2Ebzx3v*s~t)CKd0uuj^9HosFhctZkR zi`Hb?D=0%X_+<)D|;sRH|J>nKM7%hVfcHI1ok$_u(o z2lklp;;OmXDV$G?w@yM26aJd~u0kq<>25aM_m3l&YXIcc&F%2|t8axbKm7^pDf$up zX)7!XR?WTdCxt*erySHcn>83#hlOmfY{XvJP3%n)CrU@ajtBj`$53!*C!J4tMhl1> zPd_#>ie3if0c%85agP5bO z&9>lEJsCR=@fu~>0!4wZQ^yV%v!3VvWMCyz;&3Jaw_rqqC=P)_K3eD^1cpyNv;DE} zvn%U0xi1}vm~V_+t8ZhP48Io-_#B==$tcMY19O36o-yyu!6a)( z(uTO{>Ly~&4Vk)O`kS(1-b@Q8);&VKo&Dh)M*U~R+wa*s>$7S+ulpg}k0A0|WWoG- z9N7|fpo?>%xY@5*`(G!&);(<^U!Jur*d<~M$XLH@TjzLhRf(+%1;|~=9$rhtt0JUE zKF|$L(qpl~K14D{htTO_46tRIo6`81ciKiFsN*eqY158N5Y~zw11Ov@0I%Rm7)(X4ew#`3MvVOoU;%&*l%3SMD!Rj6dWuI z)&#~X^XT!hx_UW!$A-4dU30RK?1sfkK3trv#_ws=8$H@ngvb^hWfGPP7~>>yeOXsM z!v>R0^jy-W8sMecYKPl{={Y73bSD&eZ5p^1pR3@E?j^S59s4z^F2)YT4C?BPI?eL>l_>PKN50F-QTC~Z# z=bxLHrk|*7geZ<8SMkMv|6;iC@Kb>IIh2uY#bBhlmjqx3^AbM%;6eEK_C}b%<8BC& zS>w^Esjv?lmM`Udkm$4TLA2R1-sZTFN`n!zs{V(6@JHmuvCj{|NUd&!lZ|&{>zKKV7sCG12B{N;utmB+hUeR* z-Ihi<<K%FO7?}bX&|LsU>JSV9M_~YF*`Wg6wZTxKa49=IMf!og5;3qSDzrWwPhj%<1d#18 z51VqpJFCb|jx{Op98L5@&j>|-DC^t*T#P?REo&oi??QiAKnbEIoW~jCp3jt+3f+Ut&v{LxB++S2&SsNnK+_rn zbY%#X{ANNUtHD?ngj!2k%O ztCyneEZ)2p2FX*@c}fA)9Mm#RWTn-kjt)e|hye1R1zHx`si^EgU?I^kgIFxIjp1rQ zRt;9Qjy#!4*AgfbQFSVnBu!+Ok*D%bIsENk|6TaXXFmfoO{g2dW$z)~t3`a<0>X6{`M zJ!~RCfIRln0$uwX_mEx|Yk1IE+GRdPY0Ak$8a;%MT@J0naQo&I>#8Jqjqm}(bWTFe_DuW!^Tf0u3hRnK= ziy{t{>ZHUN`sm~;tL%Flec;c}B7~;-0se7zO~KGHHmYKqEkpMm*vunzoX()cVhz38 z3Fx)(*2syBwNZ0r+6bIoWsFklD8)%=6iTK5ygGgs7@iP?vipm;%6{T@W-*1vp1LuG z0U4DH6!$pwt)XqNLii2y*`g5EI_9_{_gsK|WFh^-1I7DyVcGvPJOC>U2+F%YM3(C4 zAc6h??*)C4W;KD`!eK{u^^HsjgA|3BW^+~>WID-Mlr`71D@9ENdnmY%-87O!sL;JS zMb4Cvur+l=XG_LD2dFZV5yNg^8m5-@gt57SIB=rj=HS7#QJx9X81z=`a^UcXq^t;w znb1&ByyoJ^M&5jY4e0p+iXEWKLkmg`>-}0mO06sGf;Ry*XNl_|xco6Kz@B>+gZLW5 z{64fkrvG^@XK;G}^5FTUI&mT<@_1Fq4J>C297|V`6-wVG(VU z-c6eYGr`_mQU7kivTfg53`gX3RjV81CJs{^#Y7`mfo_Ygd?p4PT;;a3Cq9x_ODz-nZi z9NWs?nPKEJdI5XAOu*Wv5;oU^;lgd`PF2GA6x|GHlE1!84)eq`y>2Vv?%TJ+BolC| zD~n;UOwkVaGSENDJ;JaRmPrY50vCP%01lWe{-BVUkR(HCE_R8NmIXkJpyVAZxeb6r z$ino+^Mo8ZSR6{Lrzc?0%P1OWmHU;$So8t1ig@-b>ll0F@$&g;3>=mAD7qp*Ez5c| z*fUw8ZgLo1iwD>4gwKEdi*$=vV9Lxu7|jB#Fq%WzM;Vd(H{toe{|_m;$%OsY<**Jj z`_z}d8s4LbZHkF=Lz7cs1>ky~_x|0sO+_kKSdQ=n4?=&il?ez@@HV=#=P;l}%K zg@>k}2zTFjGxTxof!W!xz(9=z4w4G8Z-^eQFv=rOJQ=zu@YwtKI?nuYxb(%ZhAJWc z;?{$(^ZL(-GMt7>U;Z}J#_+iJcEj?AAB6c?z{(gYBRpGuZHdWr^hSN;Dn_5N#~)q` zJ)2S-%1RyMoTiQvB7)nJn4*kn?Q# zd4r;m3bH?c>5)*lw-oxureOtWpx!)*ZEmtSU)|LqQdmeQ;rd(1JHp2vQjEiW%VdgA zb)aQ@(&`czYsaA*Qbp4t>vn12+yS&yo;sK+@itu{?0c;P8hetf@Qr?-4&upttPBDV656j9xDHx^#L`(fX{_d=^!yJ z=1BqqEY2$yNTnbulBVBlGHD30O)uOH&K^uY05ETg2+p5hj={D)*FO36739=*QAGg$ z$<9g4iO%8(+XuJK`M3A>jz$o@IEv9e@GPU_jNF-iVE&|IW+h3iyO7zFRWqMc_DKh> z!CIOFUnS?TY}NY0U6B>Wicmc|Fey0z@IwQWBW}?WOrgbxM3s7ATKCDXU*%bB7C$m_ z=1giJ1rGri_kSAB3yc$`(Hms_wb{Fyr`7dt<&|8ejFYoYTXKs0h_YJg|c!BU2}V*w@h-N7l=Wk_l{ zN$+TnR1<)UPNPnXMeYKnIJ||OsuD>B8UZ?4Bi6=tSFRa>M%D?3NGD30j_yTeXg#B2 zpq=&{bz%3!1kz<<38NvANa4PfP3^A$SbJ+er_8c4qB9u#Ttkp--hU7YwGMDi5xZkM zwb*TlPxHMFfJo*5mH=c^1ns5Y#_pVQwvD9LS2jZ#N5!*|CynF)U|8j>^doy??195n zGANNASjiNrf&F=$5h8~@$c4GQf-W72c(2^2xXCB&!4b14x(CunjCFEI8UXj*#X{IG z_!!c|6h9qC_se{Ib6I`PMC=m6y|UgZQ+lxD7HU^Lvz)WjQ&L9x90{`&XS+xpLxox# zyE4T7;;`x6zj7FX#OwpLOz6KlDTz+&>(thR=}=k76k>;6C)bQ?11Q8W?73+jd?;9n zx`z0B_7rEqbO`m5n1uro(^J@N98>BDXx}4#F{hrcuMmfJg%HrB_DJnXN6~bqZHCJY%L&UWPAS{~@Z(>;N2+R{pMI<2Z;j|>qy z3bY^Q?~o9;MJQS>b4}0;F|z51;K zH7Y#oiN=;|+6HEAZ6#y~*L8H!6r0BTw-{V7Na(#v^K%Cn^xI~L^~)OW$H6&Sb;25j z0#GLAAetAa$6z(2pDn~WbHGpm3}ixDjma=QGZj`>NTc9)RrXN%83o*>V99p? zoEBW!&Zh_GAceza?%=@^tSVuztIvHdy!Xz##2_+QUAwX}AeC0iQ z(iT&|uQ-A}r z(?k|XREQ}*2r0A+>=g#5TwJFOANlYsL@+95px^YPUhRkrJSktwYRn9ElK8B!O(J3L^H!_3l3%qHi#pj)(#Cq~f!6HnHtwBy;h>^zx zA<7PQ4?o9iT6FZ-Yk;7a15}n0J!X}Zc8b_icWmxV96jIm#GoTpAI{V4AJA4Ry_k0}%)Tn|)lTF3xiuiV*os`?_1`TLkPM;!rzgG=+_s zA)>Vc7@$8xsNl#BvUb%ZDHn8`Ie+;QvcqU_*ry_nnv8BgEL8u@TsT@;4#%YHXxnnK znXIGJ{T%6QDx7Pb1K9C+!_SKB zVOq8md9fQu9!qbE$N>({*cgL|1k{{k3%vZjEN&ha&&Y<%uaQ>SK07u@#K>|QB{gj- zrqW@yPFZJ%*Bk==73~HaDxqZmb+#xHB<5&}f3#aNhDJQa2xon5DdroYqlrzXgfnD*G+#)VOXIAUagS^IqeuX}gYh4=I4}2oG+Oh6C0~f+Ikt zF>Oos85D{h_kgojE;shxwX3s7EDnFrz1(*+-oAk6ZxYq8jw*|8HvLpqzm4tf6yu5hYIq7kIv1GK_3ryLE=PIbWz>&(CV?)(uX2l4gAE2J@_p16 zXP}4G_JDa{IDLE!>*As0+2T&HrrbXN=8b@^rq{yApnan8^F2sXJd_qa2;^m;nh2oz zkY|cDAJ(?_04P*n*C|+Plo&G%E5c`VkgH@`!4P2?RP`AngCKj*AHDQ=cs!_03y z!SeuE!16M?dw61|Fz^Y4)+j<_vz>CZOV`J-cs0T9-#bD-hez0f#LNL+AsCu)u!o>+2r7#z)Z<_ z_hDd<0S1mcK6i0GG#2iVx7iM}glgx{&(YTqqsIMpvj>CRlS3}JRp(;h`H>UJVQH39 z;Q=jS5W-sj+&m()EoL3e~{x(aNBEG+6U=@7AK61lL2mZ=Q_ z3Rw=>#SxxCWo94fOP_r_Ec5#ga?rcD*NYEd4r_OA1IEc=;xnC!=QOT0+Jkk@PR0D} z(ZR8Bu(cPuhgc7oz9Sf1BsCnat`fGT89OrAB88^|HZ=z;(Mb-~>Ds-RsxypXKf&{E zE?mQ@$4DN)l2Itv%}|oo1|ed0zJx;U!+FT_{{4^MMsQ%;VC?fL9KE&0IErONFvyg{BnVdU#qW5oI6oS_WUI3bI@C@Q@q^gMCPuNy z=MIbzl1C3z*Xd4yFd6!{jiPcTmn&=NSH^p0%>+<@s_Rca&9LK(st|+P&N2wc`G0e#-0b*D~&Y=d==$4^pb$ryF z2$#%_hRjf`!%b4GhwZ>*jyl@N4k;O@MBJ?2SGN~&Xtdte1@sOBzUdfZPOc1~#V7vK zNK2cwJK^4)>S@8$06DGNfed^fU&mnr-zS%oCfhFY8>1Ga4Dxw>Y89-oO zWS-pX7Qms1GrjP^O@O6p+16g%CvxJ1!XZ*|PL1xYufj+HNZH?BSTu{H-TzZ;q6YX3 z^y}QwErNVbQwL5CD{(-C#En(%b}r~Z-b$R^UUC)fh;&2&50(PMz6ZeDm|@RPyr?=o~c*!-F|;+gU4gq$8#~Fz7=Jp|Sm#J(5lTvqh-&CJt#) z-|g{JX(Dx^7Qr*HllvpJgy&P2#CjmEr#>_PRDEPom5d}m=1}3-;tWJwfxO2vkDSGb zvbLEwx_8>rCM1d2BIdmFw;ui*H0@YxOYoo85aH=R*Tl2YfR%S$@>kUypr z)%e?JpSE$ENT2q@KQR?V2c98F-RQ9bta}5yXH;=;m|}Be%1#zegp&<7#f;M(0qFqy zWns8y(!x}4Sm5lLwP4Hs`3_h(<=pfG0F_7jibf*OB5gsU&RGXUk#^s-jyz|_$cnl^ zKyL21If^1d^m%a4sX zfU!&;Bv3^P2C|7voSJXMJ{&;;q;Rz3)JV>){rq)gEvh!E?f80uxZO@7x$3&JFKiCj z6x{`JW4x!LF62zhKA+&YN@&K^2KG@WL!uAoiAhtm@`ArdLbxGrPDawADvJ~fJb+yZ zWDByEMesvQ(O*iL9OeAq+*%$=JF_fbmirk*4Mn&TP9$OFTJvfrj) znj7R<**Gl|_s#CGhX@X!-6>tVs-%Q=xl28y0r~D}+D_jJY*>KvexK z(H}-8!_5^4Id;}4Dnt=%uTm|{A*fJbW|(z&b_{2%BV1ozrxxg37@M3A-~H~Z;qC`Z zVQ}&xo{6=mI$DOgo3sd71*^9cFuZ#BZPsL{pFB|v(=n+B=Ava;OU`%nmg^{S*FTH# zI>2j|U5{gqS%>{S3abFE%2pM|yA2B?U~&BgN+}&7&We@f?&U7w*)|5CCr!wQl#tXR zIaq-0G|lbrzVUu^(DuqJAYb` z)f{-0np31~(_|mpox3+;ySZWHybEJf+$2X>AP6|=Wo?zv!|S(j5>Y6G_MGH%1e;!9 zon&K22*V`^_ig_4bCtqjr9F9EqxrI64GZl_$s;RrIL4Gd>LKvsiq3nwUQ7&IEu!x9+) zZa|U0!Z$TT@S!~Fj94h=mj%o+L={+N!Jv%2{}ynyDu62(=_V;8PExW}5-N&li)X)^ zH3T>T&~+rToSk7UsIQH2yifn zVLez|MQ3f%$NE+{e`zixkv;Q)x3-TUI$(>qPnn}UolJ5>54vz>Dx3$4O3fKIC8@wO zRnd)UJ|~ULX>{Aj`ozpIVqlMLTBrex6(}A2EVjg)-#pJ}yFmL**O{7lfI;>d%mD>G z;c!>O<*N_H;>%i*c2(Gz2E{y;{bG3G`#%a#J@aH3Kq(b*n9Ri-9vnjNvv)kV3}(BE zp$ACV&TnI9mFNy_Bq`d{c{6Q5eJfzDqXz}R`Ek~V=XJ_jfyjx#v7Z7WYgamXPMrwf zYkEslnFSM=D``Qxb_l`Ycl;h%vLx^z>1^72Udn#j^PLbn>*u6F z+1q|FMZZodxFhD2@|he2o`a4K=qcZCu04k^VhMA2bwo_tQ#V@ABN*1s8wrm2!0KM? zzhAKs3|obGa4qkX_2zo&b8ee0LUwr8*xnR9a@Ln0r;2+xYc~ckV!hhv1#yXhK02TL z9@W4Z#YhM=(e2K~kSqH^TcgpyIjoX;sZ*IlkC{es%zLaiF?}bFNJrmf=sx5b%^C6FnFoj<>fqQD z-vZP4esqs)yywf<)DhFMHytGE7w*mcT|12F=(sfw=P>%rPO~q3EPG+Z*`YYDeUtBL z&&RKLzC4~Hs4?ax9xZRgO3ECE%obnv!|Nhfmk}8exjSvij}UnqR)}Ns*gTatVt@e1 z90Dr23&RaAx_@HM39GGF?#)}3DR2->=+)?1T6rv((_2*-5s?^zk-E1@c=V`H3<{Gh zvb!9;T{n@R+ne&&T?4!~*#aITDrLc?9)S5o0&;WC#NdKPCop+X%Ls~z>NY~J!jRA! zJfw!E!U^G|qkRfx*wmy<+45#c&0Ya4v14wy(kA$2(`5S$&%{&5hE>>jv;-f-g0PnS z1LhH}9RKk1C=}*Q#jQZNxhZbiJSc_vthu(v7XdTf!eT+!S&EUjM|PQYZO|)ke-Bm+ zQ2N+ZUs&5*C!f`DI;;=tXM34$*4GeZg#)-r#d?MwwG)#w6e1cikwRJa%Bx5eQy;JF*Ja|=2<*EyBn}?7`ELma#~>;Y-(;%q2Y5`D+^ADr9ik_9NgvekoQs%~?U(qof(i&+AFzmc24WMy?%?xP@ z0~q_>o@r7VTH*ir-@X&xco~+4{)WFf`$$-V4X(Fg0_X%32Y|3AH4b4zC5?JkGcmcl>zZ~nXRmw)oF=&FG%6&U0DwP!+$ z);e$h$-fO(pL!IJ#lNHr{(1#_vKHx;T$$Qwa9@YlL`r!=>N(y-)z`7~b6c@NF2l znJ{_vk#ImNO8#)4_q`vU`2By%zbU%8dk-P)2$#O{HEcZWFD%yD+aHC`{iELxhZw5e zd$&WDpKZdlUH<&jtlvi11E5s6j%}$tU1gl8dvCoS>ZBXQ+&XgSrY0uCgP;9493s0; zyUL(c>m>@na6Em@oi|=56@#41Zn}$LFzu%;!0E*pr7>vHA!Cmymtp@jmr=%c9k@IfR8oKK#(I8o zB3xS{0)lR;!1ksw0F&pYc@EN`_gbbkf*S{feoQ$C3`bUk*r0w z$E5aj;{fc@<))7s0`;^V8Pq2|{3(!oX=-;D`OHT!d2RFuS`z>#+gQh0&ETvLGK66P z{e}$&lq9e%P12>_egArRm3oZlpMM&*nCKQQ7P?b|p-p6?0zp&dH9miyRDU0AfOew(dpub`9`Ro?w(sSc<(=O8ikc&FbQV~bR9A5j^wgBmo5$AQ* zkrX@WEzQrge>z{FwWvFF(&8{3?{o+!yXl4N`cb=C+En|=H9psWLYOf(vbJ#RGO9%pMg~~=iIz+2|k@D({yC& zP4A4Is&LFZdFI$P107TaZy7x}D@N>%k%2lgV6?f;wS1k}$rJ|(Hp#pp_RyAb4((!* ziMEgq$^JJ1(^=ApT$3_qUhFeUG*Jv^nv1Nl5qm%HfHtNM!fByX>`$&$`$(DeG}&{VKBHMWL;~chXWo7Gl<{}$dc)e9Ze_3c z4CjqbV_c0k4@H1t9*<9?kpS?B@>|5tlYDRME}0JSC%2%$z?8B1v0on0%Q@3sX(j+M z06~F?xw|ZyAk{@@g%BW~i9dF2K*w?T0UvBo1bzHUB`W3ae0zMxUwx^#^~x|DC`Tur z2v{;Hlr)CHd?Q&#k$mjutARj~n+xV#ZG}iL(@m%{JeU@KA$;Y3;;z@ZncmtZc> zsmz7pO7d~GZhjn2M)61qm)XiE0}Z(YOV`E|Z8cVDLeGKo{&>BusSI(LV!&~LO*vBp z{yuh9;6cpq*1doGio{u5c4BuB-Tp1iex?tC}QutaZC6%)yCS5oSUn~rQYJ!=Q zNj~$q_58c=nmStD;rg|^;ge5)38$bMnbzJBLf#g9vA@2)&Vlb6U{VW)wilqI>)L~| zYU6p0k6#2x;*}Eq93GryYn)MifTN@QXmgi9@oC77JP^p(A5n&%hwn z>Im}~#b>CXuvu6Mqkz67ayB@4A-wRHuZExh{G%8uNo8S!P=>dz-wcmEdNsWD0Yz(5 z^C)G22S3kJex9M_So8%65AFf%G>$qBj?r%MM~5Pv!(gy(PQx(=Eu*35o)gH6UZVY3 zOS=@F!Bq8-SL}Gi*bPAM7CAQs!NRZAB^VcE;t2p;9;LH(Zvi031geeTy#_p9Jq0^7RE1J35nxW*g7rI z$M!yTKqyG^{;C-3E-6xgmFD4Dge#l83|Z|G(~TQpX{ycG zyJQy*Oo7Q?tdr-uh2v7wsXN{ySBo>rna!M=LlFTgP$UiHygf1i^JGgO7)<_ct26VI z3piXYgkFYX2PW@88KGFOTc(Nd+7Yb2MT!S>k=WtyV}5^(^H2lS0YNdg6b|le(^Gbt z!l83v?hDV;6OvFhW1i z{QB3TW09wLs5z& z(*f3y;2Q!mVC!J9%c~2>Fw6ltmY&->zvoSyQO-ApG7Gca4b$Dn+CeRZ9P-uN*(7Cy z+&V%vnGD8@aJxDd_yIUYPI-T_!Jfe6^a3(tJ|YfYH+#~llgYVhz?PksF$LrXZyr#f z1bdXGKux_2#*dvVVpkB1gPjjF_O@6`@XH?V%>gte`nNj#VVG_seWVSkm&)v!V{f|v zQ&yH6KFm=*@4yz0BClYN0TdPk8Es0TSL3ukLedfjP4d$n0V*ied9~khY`l-oIlNECFZ8b!qxdXz+5d%_78`5F^n}MB3$>0$1c$JZa=m~ z{1j)~amMew@h)I486JK7VbYfp6mL>nL3E%O_Rx+S^XFzMS{)>c@)ncsQsEn4#K|Or zp@T5=6yI}C_pjd$Q&ZDnXqtTg2jnb6Xq=lJjU(sNQ|Ns3PAB2}16a^qKxLAB7$EGN zMMw4U*%Ao|F0J$?7zCk68~MA*Y2ux(b8^&g^-`VP@9 zow#9gGR^((=E#gq4PqYvlnFZhKnNJ|by}s-+XDH}al$LF)2@5&Jp+E39-wpOzs*54 zDkqsFOCA9tHd5^_Jd3H5=yCzLEvsVAFLo@ZK7tec+o+{j!t2G<3v`7R{lBylIEF6cQ5!kXfZ zB)R}wVZ_k$D&tx*pP%QkU6{2dVw70G$8(uG@B6jG?%ffEi>e=^mzg@zarBj7P@Uj) zx`;OUTJwjUl_2pb>1Rh1omq3HE2OkqKM+Ym*!@1pk1Yzd1^!v=c@jL6CcUk@ll!WY zo`(Le?4ua?+zhzP_O&T~-$1`vTali|3AjT&7}&Cf4u{8)SH z;n5G|@Z+SmsClVkySQqcW6{mJ+f%w)QeR?WAPr1WC=oF^i!vnWO0`2#J$Ovrs6)?l zt2d2KJBY_ZL0@~YPlp=PFOx%DbcV@WC6e*WyP$M{Y9FlL;MFVPWZ?tYO$t~}mLj`j z7$@dSAn2zcaFh1iGMpHDftc*9i2_GRV^2R??b;ZODr| z;vUIM0RY;NT|rY1$>iAI(uL9qFfV8B(z!4=HyQ3O5Ed&gaVRlnc(p_1ah9rayQ z``-%e1@eY$%rnI_aY3%f&&>0(vN48)5H6AIS%tBlCzH#H>V2d6)P59L#N7_81LbVmE?XF>-F9{ABF$yzy5oKnMMGOF!ZDin38f#UhOHs z$R^xq(AbGUFg-{?-aq*J-wNOTkKZF?dJyt6V+^7|Sb5FJ*yS+9+N^GU5H3Ib7+nRZ zjfZ!>|F?0}@aXhJWI5Ls7h^89l0E;( zl?WIX{>^s~Z2yOBWVjCbi^}q~_dn)75@G4%dngmav9RKUy%XV$AH2@96EdR3O>Te@ z!7Hn@z$2d|-MN@0{-vXiy-Z~(}% zy&$_!yDz{V|L$QuPuAB_3b>TW%n3j;fs(f93^mCgj14SAj{d0$dgS(#!nA=QPt(3G z18D6+p(Qvs`|CKlOdGSmX%6bA&i)PXrw_IyKZJvGuoWr*mJCXC7yxP(nti?VfGkWE z3>q>72y2pC?7EwVQ70uqHmHj7D{ia;CPu^X(_aqDoa^MwJW8$*c1aN^(^BYI77r!e zJungW7w$$7u!RzAqipfQLY{VH8tpw4_d2pOhC@*ma3c5WnH*=w=t70?-!YCCM=pMG z)W~N=HYTTOm4g$8P^Lx*VZ%6d@;-G76dK^&N^VXv*Xsn->n?*!3E$!Ll6ULq)5Z<} z>R>UH7q~bnJqh%nqr^Q^<{xTwG|rxH3%OE67|cT)%ujO$gut9k5J8Dtv@*}dY%Cmq zSqpwgfun3IR0LggXrlzHu9S5EtYn-!4L`EfIaufp3Lt9~J6h;tM}WbeSs2xW=p;4n z(R+IBEIi&%0ha)L|HeIlx1q;&*g)Sq=5`0C$hr+f;HEN|-eCc%xsmF2$%+`kE!zQL$>^5F zWPkam|5tQ^EpkfHma2Uf`M6ww+?tG+*i&g0w#sB4h{vTjl zIDhCV`-NNN;^e~))_ry3V*n&9DuD0gXb}e*piQ*k!8M!<^lmp!mFn5N$AjJ5tUtaD zfFFeqC?pku`?X45qid@{i@CL18eo|^6g1m&rsM3i(L$G1xo3wUI8?2MQHgB&LmMt+6JB+Y=*4|_c`5fhCRC4bdh5pN8lm} zrUTw0_q`7wCMZm^SK7HAoFhZ-fn2`=Z^`8E>0o%RQSelF;^JhhesmUDFb9 zLd7lqj4OLBBi1^BMyTV|HqLPhUD?3VFy%0^s=ki5#PfuHGG)om5@ioMdd~G31whqU zzwJd*>F9G^M(<=G5lr7gT+rhHF=Sk}wSetwLJpZ)xxGfTfU~ZH(M0#!rKEyfdY&X2 zi^mW{$bd|(J$HF%Yy`T6{SpL=h3q~h@n|OzC*WBqDEKXqzur%td5zx{VEx_yZm^8_ zB&P4kz@kUgL*~MJRwc`1MeU$vl=1}9Md!QD`fTIqjY#kjjp}G&Yql3bxJ+c^UTrn# zlvs-e6X<3;#9#*aYK2=r`@494UF!tSS{^;CtSe7RYIa)t%kkbyJVzTxCe=mM7I;)( zpCc)XtWD7p)?pk6hYY~s7&UPSdWVM@8Q)7}neo}k^SLPsTzQ^4L`Dbe?fLFCt&n!5 zeVp6O2>I!(%aPoS<0!dW7fg_(6wRy!qmbVe(tw!q9Qwgsf~6a>{jQA0H0uFaHF^ zFH;F!ZWS3q9YdR}4Hsmw2$``)Hk)@1VuN&cO?Jevjmp~t#KHl%6cX0m_2oM&3UxeG zg!mLZ59`Wx9r9ssy;I(xQ0QT6$T|rvr%@;x)!q!CyS5Q&|2#h{GG$B#O0X*+&(eH3 zfp=)g;s6kV>g2U_L+BX{8w+dUbF`#N!tfpLGHnQ@(Az&6!*L{_#C*UvUwR8L0-MkM z>;w2K+}r#U562c~=O3Bp5S7C11;T+jmbIA&y)YHaCj_GAB8*4#?fsg9l*}W>+O18-gQuR=nB|Q})ZI@P?PHl+`fb`sm}3 zT!rmbA+cwN9NI2CUT2=KqbRp1j_jw<%xFXjV8k+^G>}_Zw)W|EIHlTN@jPZajg_`( zn7%RAhE84HFm7$0ENR3aJhVTE2gWaOnXf zh^ZRh-3$8wnMu+WY{%!Mp#slj=yK=Qtq9Ef$XOhtpXo83TkfyEK!PUe3NUEn!xLfU zrI$hj+2{B$tOXOg1S!M-oPL0+#)>yyufFgX)StW};+S_;OYgh8xwzPkrULnlCklx zakw6auRaFAK$$bDvc0<&nRm~i`GSV7O+Rq5Q6+;>w|x$}h zX`!yUhi%SG4c5|}QXLc96PYH{fs=^2;_R#Q*feCsrt_Vi&%zO`SXX%Jx4#hj4d>zwsUmfhBhOyIUXVn#scU} z>Xh7Ex_mGRt4&lfUm{1>kS$JGmr)(+IAV)6qL)sSv-mFNDWhwgvYCTHm1#>bl`uoD zwRX|sD*x^PeOIx4=KD^}jzm~c1Dwx-_6>U?NIrt~8^TG{nJ7T;sEZsjF)%O~ zwlIx9{qf6?N+tBCk-K*R=&*_yV_EER^r8Cy{MA7~>1gal@AGDPzW_>Ba1XFwV_iB} zuWd%od#@*)2MY?504nv6tZQeQv%vatoB$W-De}H8it7LfZy)aROtPosoSHI$d;W{}$lKG$gL?23=)jTp_!Bq!{$g281obryCYLtN?ALSlkYLOus z>0=6GGvx4FWbP0Tj$+Iw6|C#*sSqQ{#P75#EM&w3HROvV86dN&9hm?hoE3=yY&p}0 zD(o#AAvNzaU)Xiv{~~ac6cE70s5F;t;jlz^9VfC~#)kPXUT@FgVr8|!I)tpTp!(3%5bQzxa?QApn6AO^wa-Q#T}QEvj=6suoilY%$4IQ? z{zwYMdZ4ol@jjoGHDQk>&^#A9ES?FSYR{*keMv$K3VGV-g!=EuWOIQg&X5rp^NWp^ zdv7VQN|ro$yGEptj||SU?KAs{u<=XuE3#38P|;~eUIe`HGx%N^G|yf@&dqTa!Fkx0 zRL5A10;wBnoO5d=babrqIibenFaGr3&;^J6=KP>lC}_lq^YgJ^zDD-ebfG%>Gqz0Q z*~FB}tNFQc!XMA<{uwE7va096ECol9=aBtm24i=X%KcNk%g#ZRJOI-EyJM4+v^_c( z?tkzRtPVIXda)c2Sn7Ju3D;glf)lo45 z_Dt~CIOC`Be%V+S0pa~e|K)!bRsme*`R)Rk#%Iq13^;TsMZtEHhnWC$eem;_!}ijH zcp&toJ$$DufLhkv8d=-eXO9D7;a*{;nUO8;bdzK(wub!8Ivla_*j z0+bFRIztmpHnR@U@WJ)DB@+2gqgV2Dta2 zl+t{TRg^<0&o6!Ro8iTm-ituZ_E+5i?pp0S<7*4_$n6V@H*SW#1-6)jnw^=T zI(jfnFyU!!bv3-cx)Q6H_o$>U0I)h~&6T9b=n*W{9uuJk3DxfqLbd41DrUoI_HS0< zx_4QWQu&ZW&yyU|zG{FFydmLH3cB`Ij=~@PpZ}iyb!4P)N;d|9ms#OZ=#>k4l_@(r zyz+}z!gJ3*6JB}uoiK6!kyunRGS7}RV!wdiN{~(lc|++yLxSEJPw)bO2H~aGocfK#p7l6`G2nVcM9%0th+>VeGQqIgZX|7r2`K(%n5AXhtQ0LIE6MNvxMWPn|YYQC*Q|7SjqajJBrJ zk3C}F+bG_WUWhK4y~*&i4glIdqiKKppZ|-HBES3n_Xu0EkR|kFvTCOeL80eb zkbz@^aj_7q0n2H@NP+(6%C%9hGC%iYa-7#RJ;7@wI%L?Z6Zy*nYKq4^7kR2OEGl>n zcn@rjP8Sn}A|c=eFRS*=FSc`mDH>^wylga#uzPHO=q8Z@0lz@B4JlwWsfLkB;v`sA zU3U->#@*byI>SUHBg_RhkJnas0ww>rhdP2i++UeJcNqmI>T;((*`L#+(}W4P;H?80 z^2{dKXDkCBzK8bL(40TeLQ=snrM3XXP*^%abr;AF-^O&rNwCwcpv z`xM2v)=0P?X=)M%h8UAg?sJ87tnn)kBZs7+A(y7j*?y^qkg>q#)FMFSK!>Uy=COdi z>jN~~UC1F9c3{YlvS*MbiBvzk!Z~7Jy7*RS-JJ0jz~>keVidi8f?hBkA%_!W>$4P} z>j0-Bx%MYS%Z4ZYT%1oFN+Ykw5EujOhfd;YL2+fCwk#|g&d==Xq`iqlRghh=gQ<@KPFYonCnp>#D-N{s-psEy zhqr_-;&$uqivNrbD-B`E-ARGuYPmp(weKwc9jFVwb?EL(viO|uGlKT~q zO4!*CSsu|mWPz=pXNrL!&NPc2@QFFv;vK{Pc_P%dY4n`A*XmUFO}5xP;K&MtYb@?p z4$aNg`SCnB5UMKp<9mzGqhrQ(WO!{&XhB=cd}g4_qo4Rh*j!n|v3B4~5rU&Ty#K@h zFPyvjM0n!)FNa;yaC8F8%qHkFqR3vKkTM{XEQmF&$@P)3H{DP2$LNUp)ampf-JeKx z!CCT*=Qx8Ozw#=P)G1o~T|md6li0Q$n14ZTFVjLL0PRzJ@sX>M+$=y0spE!_P1nn5 zuGSqCsOu<-UvbO_klh~Szm+h(g!QBASRzBD;{9K|s+v1v6piLm6~zmML|>di>&A>=NNCnVi$um$Vb*wzqhfA`PmTQA~UYMjw`!@T$cU zhFNUqVs5fV_ZB?;Q ztG^~xKRFA?#enpoH(>o8gwRDHM{aB|EG{pGv5OCdi}V^?fqh$NuZk%^ZTT3lw-$C* z4#PRXa}vf%Pfl=Q`=wkj3WWQt@{H->Y?$nspooTCRG6f0a=J{X;c*BNmcj@*ZZ-0H zGo&5#!?OPHuU-Od!w6s$EOO}LAS#D3bViB-G9M3c46^`k1TaMy(mS;0diJR=gqurC z;R54I$3|eXHv7ZM62)Qw4|7lJD3SyoYYGJ+OQHNCEn${~`}Jd#2M5v~w)+55$06F2 zFv}VqEz%x~!4<<(vnWY=+)~siFxlTZh`DQ*KJ_r;gzrbjV)^bO4!|KfY^yjiiSQa= z+%$SLfBqLy3EX|}HjG<=&^bju?=Fx6G8*20>6fAMABie(Z=C?T z|L}i*mwak48KRScA%1XoB`ke(7dfsnWCY`C0T&EP|07pIYIZtQc((k*IC_KRy9s0n z2R-(UuZ7qC=I^*C98Op*&tDq@Uk7mH=H?KU6Q;(k@F6|n%D2CQ;>9U|VJY(SE03a_ z2=xsBcyHXi&04^qo<9fcPaYnME{>vxg|1?}iz^Q}qm&BF&>wPpkDN_dclJ7eVJa5% zwJC6E87*P)?LVt$fBEsN;qK~sjF9wC^oQH`DX@Z#8|QaNtYr^OM~Xe&+FK73w2c$U zfzU!f4Q-nMvMTK90J+-Z7_dCb*+R|s@p`OuKdp?04MM`RbX>^-He`cT>L;ulEt}&+ z9u0&nh2EF#2(vsu20<%hFuP$pPH}eBH|it7qj}M7Ky|C`G{982@YtpBA`CWS8l5G_ z&^F2ClSwvRsSE(F@%pI$IL$>|f44MAbU`^fqrQzfE(8ZP@M zpMeW~5Of)@k;Rm~LsTN`WQv0&lpF z^o)d9a5zj3xGYX=jYd}nLA=*avK}SmDn_;VUWScDLCwNm0ZdG35Zq`(%srNccaoT) z@>6u3|Jf-+K&peUlY4~GvgvZ5&uSVB=$I>e&)7GvA%SCyHw#OZ3Bj_3xb_dW zg_T`dvN*+Zi!xIVY_Rg4mjQAa+yU6k)Bvfp>@pq0v@QF2%hC_$MZ^s)ea;np3ZIYZ=a zQxt|QsN7E3#|$m#Dgb0jsklzc86Yr$Jfr}vMf7$jzdHd;HAv^_H3Efh-$S?Q=ycIe zs|#@B{_1chI5)j~j?5!8C-+E!75m%4nORu455dFP;pb@V2;3@QgHG7C1p4#<*=w+F zan!Z)#%G%1vqrCbnRY)nk|9JPnne2y?b~^&$i4|gb$kW8{Db`?T?6}KcZn7@u*Pd- zRe2fv1u=(gq+#>DZ}X>Z2bTa{nTi4o(3^|k(ex?7Rs_#De0DD>VskB;i-Qt)7GGXP zw2H~6&d76+Hrox zL^|d6tF;4_;4!k)9w=U?tQ%!n_q6Al#*8yQLsvImE ztEXV1Y{m8b^UsC1fAsxWX>R{G%r?dwCFH$f983x~+-CKuRej#4g-VfumVM-2a|rJN z$?}p{j2fH1aqW7@%LpU1eXhkWy?ObZfpe2#75tdOuo_yokVnJhq0n&4 zCS-kPo=3){d<5$;I?3s@We`?H@0vAe@Z7$SrQ{0_2yangLr!DMCIz_6XOl=JyfEF;bC%1Q`Iu%JN3&VdX63}V7w7D0G>{r73Zg%Kc}|Y=JCqY zp9=egraz)}TrNcj5C`8deypTMPGga*kuhXBiQcvKk$pc8?9V$4+g1g9axWHVSRwAX zP(fQ>#!Uti@TT!nU)XZ5*06QpS{sAUD(vNP7R&Wng5t zs}l<1)iIdy zt~mR$(Dq%GnK4}IK{fR)<{k6wM!bMqFimHG+zvg0U76C@FPSSH1CF#2@rd&|l?o&S z;IapBY%f{WsBCkKvoHIZCHybZ(TO2+LRc5Z@>F2#_nc)5xFjCTiL@!dVdE1VZW%+F zYYnG4_vYPdPn`BvAgbnlBNW&?m__zLyJXiJ*P#SJ^qMieioHm5+8Kz*1yPJ%NP`}F z>9RQdo=6gMV{UG=13Z;29#f<)93MJP+7^MxKatO2bWjgQy~t;ji6k2OvxStcuuA;9 zWLr)2P($YfIk0YJd+UxEke=xjE^@!_ttm1NkkCGv3KRP$QzYth3shoc0AQjsB!F@U z9GGB*ybhyZl1D}tY^{|dWhSP!?QR3Y2P62i4WavFdZ9g`EgbTGQhD$Jj$@PPUd{AL z)-MN{)CP3QQl7w?+m5V?jmz@8ZsgarE~5PJ=bg_0x8X=eGdNXG-F9jZRrKr4z~!4CLPdPId<{RIbC}9$Q4;LuA0u zHq@^}WO|PF#@ZuOlk6yDil7+BLOVMdSQuA>6e;tZanhdWoS&CSb&4IOI3Mx)U;}NM zjI-d@YbJaxxTwlGIYsYTv?W0n+rCDJ3g;R&*7ffY^Y9*)i* zR0njROZL#~MWO+wU1T9qMDR&$KndM%TQ>`t{e6qQZXL1*bqm~XRu=fn;Lat zOm$*?5n{Ce@I(KTKhQUqs*mr~Tjgi5xB_G4MhJ}Ay|drz_M(7PxG7fUl*8P`Axy#k zSVR@m82B|GcP6A<%YJwsEW=yeD+?(&Z1k-gH@Gg|0N2%fH<#9HnnU7e#*hb_Gw*fxYi=33WboK)(ubqnvqssSER#B7?L z9g6_FhJc{h8+ht*TrruX-3YgHh`A+r)$ zd-isO+{}CT*1{XVc$)%0auhLw#RFQgVazUFeu(xxBOy14ki&{4VY-Vvmx5)uvx4zF zM$yO^B=Cyc00%>sorEm2l;|La% zqO4nQ-b0@B&`|{Zhm`PIZk+p9jx?$!`3rX4Ta}Imo|m%Tm`@@UyPF%~>1RF{?%i2H z@pG7WVJ-2R^(qg@Q@gjg5w`IzRhEZxyBk{@4KNC|Nrb(_J85@W>bD#Zk3<>wwTI#X=>X%|5&d_UB}GD=fbIMmSu!OK2Vtg0b1Ve~3mXsC8KA2>%2bc1u#$(c%0;A{SywFo(`T}Kyw;w#UEcPRckz+eweKNPBKOE{c> z9F(`iNn~$iP?6$zUfC*~RCOad_*a1+K}HSz)8u?B=NiTopK)YPI>5zAj{=w$`5jes zpzWfnuwUl1J3UfHNRVrO-T=<1g=RX3kYk`1O9n3E}(5v&R(mANu!+K{OIx6Q#9 zm>5;Eg{9H63IM>6vSC(@jA2*rHRdH^%sqRuy@E(tKYM5z@)c-0q@Zr$kI$-p=D7vv z0&rWOsUz*{>w8K#q2>f@YBZjLSI@iO3%sIH;|>f%`}ykg0*0MSa1Q_;Y?olfP_JQJ zQ&wV6VXo=5v<<$_2&B)a4Y0^57P)X0_f>|uc#~5A063CKL_t(8KQMw0f_T6lofXST zPXT)QvU7pOQmUmY&lNjXa9xxL*TQiCSgVp;uL}2RzMn ztVJ$lgU?hqMn;fl>I3i|Q;8qjg#i)t;7rMm+x^CL7$@lo zGOIXL@q6~&HE^B#%o)cfn!_8x7V;2tKiLzV9hpmqNf?#uU`@^#ZHWa59R_^>f^_sx za9RaFf`4u1#>z^x!S0*RpwDeuhitNw+&f;HBp#(Al*2MvDSK~0EhQ;zp8a}DjKomz~1STrUf$E%_|Yu_zcXT^xDSR4vK z<@IIGy?Mc&i+C21U%{)Np=0CTIiZk;kMbDhQcS{iY!{ov0aULFj``pS5>3(J^6@?H zzwJQ9Geu%FBGGcTEhl{ys(W2X}g~qg74vNU@2@G-Gtps zL}bnD0caKaT6PgKixs>qtQ{N9`s}i~7%y*xAM;w*7-i41OQ{Qb4auwI${11=v{mG8 zno7)qhM4Qi0nU-nrH91qAgb4o!VJvKmw)Tq;eCpfwpP};Ac6y$c0ssh45SMOz~#Z6 zR24mFP`?ET3hyxmJmwiiHku8wS7HyKP;aGLZw`YIkIb?tASIq99jZuakMAkPdbT#y^)?Yx^ z#UW$yQGQ%cLE(E(`zhXYoA7CZTrXnQ2u)P&Y&71CX$rE*{O5SsHUjkH7hVp(@%8_f zqAx?T{EYRL5wOp!2ku~>bb_gT*s-TEN_M~YAUyf(6X6Fx`dK(w{Fr-egik#DP?(`T z)&P!%qYxW(^nhmx`x;7~LvY(7G|Q@UzA);BN&6{UVhuX*M9V0z4wMThJoNL7|9??u z8Y5?Ro#$KK)m>G+vG;EFLJpVVa2pP1hMdiuOf=Grgn=XJMy!C1x5_n7NW(GC68Pi= zLla|E>JFj&aLicS2P@0&FPw_bbn4v6^q+p`cZtfia{$6YBF#k8xkg~Og0k-)9FC|= zAM>z!XMyW0fQ!YZl1Gj`<;@<_x35e?Sr{UT~w$9cTm((lF!8B5yT{^|se2relq9p5aPUJh21Fp;g@I zm`5-pe`Opwk65naksJMDaB1%ScM(M#TlQx!pRL@yO|+?*R<O^*Ia<7vnm=dsQc;&`-oxR8r!tg5jdE@6l zOp_OWo#--qr;p0qo*@e1I4JhbpG6mkDtcmr`*)8XPP_N6a7^-z^w{$+rpo|iGM+6#B)pn{F38tLN-{l2nBf}mSOWj1@S zjp$4p`pNxo5mIIsgO9<>qD!mvI~Sw~G?8dz8@)A_U0-D~)XQ zJB7<>0f^C*0@oRsFgOx?SM?VCpt4?h2DUlTI$Md&dDcG04vTwuA5tcW@xx7bWJ_`&%4j3pcPII+?wYLyz&ZHqU}J zBdo5oHf0lfLJziTX>l5bMh23i-XdA3Dp6f+j0Z{Bd*0|;5eIym$iIbY8M2q{aKznD zt;mWgVVybWeev#0YK$LbOX5&4@9Nee4v$&K$-O_f5MViqy$FbSa-#s*K#VqNkI0=m zc*j(`g@M>5k1RGzTtuG;(sdkMSDlkQ@}+DT<*TxmTj*U+{4*okz|OVf)Os*P6}#R+ z1ifc~wu3xZz;DM0yU5rNHVTuGb(945kN0JM3?}J32`Fpa%bu~$n;FA>)AD@3xn}N5 ziX54HJPfY#817yIH55G}yUM}>ofD#E7uZAj?8bT3+IV1(R4O^B9-P%c&crg^JZD3! zf(&JF#4Xy|1B|z!7g$o(7dZq2@pWVjxj?=Qq*NGZ3&2-DKt6Z`5@Qtrcz|Q??Wu+a z2nBI9gv4yPrp(B3!D2XdGxFB%$(YRXbfTlDCD%C-qIOr`ryD_#>pF7c;W&y@4_?s0!(!liml_3&zYMLbu+E%$B#lz} z`pduho9LLJN2Fplavar+2n!O!=0@r9xtlcuUvOf4lxjjYv{7Q7vCXq=gQoCH+}Has zs;tm-k4~n(<44lylTW7+qP>$Bp5nBxLu|0!w2JbOBLY27DF9%J?U*+WH`TQ@oQJk% zNSkLP#3<^5o}>z!F@|T$rOh4S++$eyp6XX2*F!BZz6^`c17p5~b}H8z3Iw;A2a>XJ zX9FaH#P97$9c+^Jz4Y0q08YDUW_p(ShNi;C1n;B7X_^kl%yZ{GBx8X{?gxM-!|&^> z%jvo2pG+_C_s~Q)Jl{^DE$eCN{!$u0JVsgx<%V)8Gp>@x1j<64z0pEJpV7cU49x%l z^aLFk2n44IT9X|jDu0Toz$y5^7miZMz#OdukQiJ##{8CNche8Q^HXRT^ogXJyt#dO zQ=%l&Lp0Jx6m))?lFHAlzly;)lLZkf4-XTO_ofWTM&vqWdXdKQPe1i+x;r;RrwI5z zT(?LEgmuOGWVgRyWL9cy{zjd#-2haUy6Y@2A! z<=5ZDnX+=7wIfnO z9CVLQr0zq9)9{Jo2ogq@NbU;LZ)PLb|4pShK%)Wo< z`Il19xyRGUBPY_r#WlCZm1T6w^BW%*{I?#1G$KJmS!^@P1{^c>egPBMS zDDPo(*8v+9Y^AG@#e)K&=t0Y~1^()79RZ7eE!43B+%&yloX1-l0hQIGVTs%Vz(JX) zXd>pUrRyz7v9Q%O);b%l_jEi{*#b?UFVGV3+OIdSO=9hO2I$5>k(dR67Kn|Ej>Ulo zIwo;Y0CQ`UQn2r56?B?wsf{t~sk7y{y`S$Z2n*R_hS%7`o?IsX+N^?6%py*YhwAu# z#Z_AUv1hxm&jKZ7xC1V;Yh>MkvY@@Touu*zH6#!Lf zDAKibp!}nwUBDp_Qw#vEJ+fZFEp5q+fK(<1%KS+4vAC7_V=fJdNC~hNsy%y$MuwS7 zZ=<6MRfys(>5dr!BW0+D7C1IJ+v+u*&3~00agaTPwN5(tf>4Bj+0=?5Dy&xpS$6$& zbY;jZQ#!Ibc*?Js$>Ky_D()FQiIE|%^>?;=bsl_f3-9C_w=-_{h;T@q;YoZhSHvEFE9fUldHn_}RaVL5j~74oe9=V{hi> z36ahZ8DsQL9BRYEqx%g2+YIwQwyq7w6gNHeg8MwM8>5sT-%Bv$f-RE8~qbgk_ z<^%g-3px}4eDiMB9A_5Yqm$zdkTmxyx71Sa8ubLj3J?*@Pc{$9g1J&p{eoy zB9aMEuHn6Ql8!XZ%w!?-0y-UgQiFJ9p4=1FsVHWPeV9mEzLD0z;N#mfU-|T-@BK7g z`qG8eO==vfH^mKjR?KaI>h28y)BpB6S@o%^f*SR8P;{rGvpR(N=zLBek{9VCGTWal|#oOXV;MZ&zNX z2Fys%EC1}vX$2W7ZLg)7>({7&J(td2dLiAoe**FJJgrU9dTnh! zJ^a#z)ZaRpW?uU{gBYAnH;GcIr%x__82uJ4?9@Pak-?BN1@SymYuw$*Dk}i=OVy>J%9`7FzQd!oxlAtdLpG+KuLzZ91bkBiazvJ zE9tzYAN^mPH1rtUw8joRe)L}-J&^DH>?cHNh|)2US(Lmoh(f}MQ^ZL0w!BKKJw9r| zSgB|1&e29h4|S3n1^Oz}sMUfSBPK3;h>3iQs>U6hx6>1 zGDL6qR~~Q{SupD79&r!YK2QE?F9#+>y#+eX_G9a0cLWv%{(AU8qzKV*8d*WD7O4OL z3aS7y5e38mr6!FH{mod+SP5e4fKVIB2dL#UpYI=2kDc7ZKbu5C30$%N;{*|p&js@ zEgb70leGrFIg3G-ZlHaV;^91(qtFqZ&b#Dzo(Bw=VAJuD0&oD`&`I#bjzZ42+KkVW*o=DX^t;ZsX5!Ujel+<~v5Lrp0Fwm?J3LG_qQn}Pm=Aji2e@KJ zjJE2GdR(>@^<{zq6%3_3QKN~@e>d~Ew!9wpt4Jr3GVf#m*b3RbGFgDJV<&hHfDN7B zg<~yuG7f-1KDG?Jus8R}K(9 z2r>ITiV?9bn}BpV?|nGC+5)p4J=hzg>g_lo(giaBQiEo@r4{BGIu5udvKidjUc(#g z7c<#e_JIYEWq@Yi5UtYyc6J|E2ThXmSYl^+eK)6n4XIlq82F5k$o+iM@aRKyplZ{y?BWEVW4n+25w^gkj z4tlWpwGF@{pKpO3%xJo<2HxDmWA>h% zM+X|#-E6k|KWkQoe;=T}#NdUdT*u_#*eH7myvSPXL~qjZ$pa*;h0)1gA^5UK042k^ zx*(JJQ@jgZdWbli3Gi5E*V3+BIRXs0gU%hBj{gY0yolZJtMCv1&0mzvz8r)YF-n>N zinQJEPJCJK2!m&&NN-GzoZ!dbhPTOv^w!cBLZJW}t%_Gc$>Y((vXH^EthZEZ+~1AY zi`QhTxRnTIH?1w!Nabjt+fdx>inSXSN4Mqfq=k3yttnWaMle`ww)8~$p zCP!&QZCIYA%@%^=fnG;O0DbUw=ia$OUvM@dntS@AJ1I~4)JT6j3blgMQbSRn92!W& zD7gN7H$_l9pN(6t5UI)@NN@i28|k}$^cN^kfLQNvMBzPEQs&XLAO5mrv6tk|3L>Xj?$BC-ReIo-MTMp~ngaG%t83raX- zmd(_`+_wYpI$57y+Elbd7wEvjE7PGsAh(86@S&aDQ!i7HP^D##d_TEHQ@3WqaXN~U zU>9&QTz5J`cZ9d!{zV#=!%LK~s}I6HC#J0dtor~KD>L^25%Lv{Xs|(<17b52eDsml zG0VY$5hzw;eXB%b3MkYfN*SxcSZPDl4}TXK+h^~L96pMhg+jro*OC7`MI-jI_C&Vs zVSuHd5jovsjaJv+$HEJykU?Yyp$H(-fg)<~UeXafRjLEyTiIL+nG@;tqfk63V(Z3- zRDfgLS%R!yHFDKX+qeVr{<)`(<#QO2=z~l2bARp@^ohl^Pn$ovc3X_a0=|6!Zv*A# z@uVG{L=I43JFMx0rFn|a0MT&jGZ;%LGc{WOHFEH9S=$09oyoQzE}(Arb3myXkpXz%>RJ2Y1{lqo38U~3%94Cq^^cJ<0i+o? ztDxPJ0>$9kqQ|Tj-$HiH7{#VUR*m#En6G9K0N}#<*iYD>O+}hvvk=P{kiHX{HTJMa zluEUbM=3Qx&};O_XseuK_nMy>0CD_Lq{H)?5obX?4hi$&IE`-cxBv@Y)65zO(0@he zU>=P^$|JVpfjY&1=NN~2$>-XlQS%}27fBd}EfO4wC@Nsg;wC|X!4v5pBN$)X7-p&c z@ECIE!6^JT8G>%;UXH(&`{Z%>Ja$&9ij)b}R%GvR8cRG|=UmRZztd)oPQv3~S_O>Y zVDf$*Na2a6W&o#d+~F|6xd7|R4HtMb#xJv)b-dV zb7h3lz|f*p26PO|AH_qqu!`coEQNB&kLmqwfL#=(BU@pG{e7PprGKu&o!_ zU7h2;yleDE24Jl$a8L~q!ft9ybn=vK4-{F0dv2W4)^lZaoBe-vRs_B?_h(oyX&&;j zX*0>XNy||FC~Z{Ez0ZxsHIJC2kT*-kR0w!+KA?n4^JjD4KLugc-E7~bf5&l;fS&a!wao9_nxZMTB0a)kHe^p@@LSaOaj3a<^?(4Vr}F&!{CZ6 ziTc;}v1n4H2j`plH>)E3%h+s=8X|-nd7_0L?_q}#kpv$iseIX;B1i>Zd3jTa5iW&V zNE{qHzT<=Z<79iS+!Q@6H=MumE2dFGnWOMcBe=BXm86&!{q*C(?z_pE;FY`tldk_y71$c@-;2jS8T+*eH=E zU~C?wpps$05m_3iA7~ZOK(G{VTM!CSohs-K4642C$`vMwg_^y7J)OQlVN5r@@eZD1 z!fSY+eW?@RyhcU(=F~kRSTx_qFvNWFUT$DkEWlO}=LM^HdwN!2RA<0RV4_UFGhTLU zoN-U&$9}_m@FhJ+qOLdW`Y=CEyCABS=Qn5#Rb`Xes2Rw@cX>7<*Uqz1J=_`|G~-)d zTuV2v&87xt_-RmExc{A7x6-*snJ@a(+QU}b#S)Rqe!TK^B3Z6u8LGm)+573-@nh-s z{8T!B_Ds4lHOuKgGpT~oSeu>Ux8znRP6NN#JgL#wEpcB=`p%fpycR4-v%nPK(nD;yf0vsME@~sEV z^?)`|C_oG^at!XBa?5{qh-&QOfXGSTV}s=Ygu8ri7)2t@r=b(ZCf~v9x227KVE365 z?6rLQlkfa5#)eTRygxd@@sdPr{_;=0OM4=YMTF;R%cVA)k0J{7`!{c+l=HNoo24z> zgOno;e4Et!C{f+5rM2|V|Ncw7ZjW^yOMm-6|4&dPI^k)KV)U>4`Oo6W;{lZDgIl-L zy=&iw7BEg4KM_g2#+YbHHM11s?g79~T)mO@d8gJL zqFQ85=H7ZE^o0?gUQW!*ojji?5oF(Bc;S}u%hY02ZJllfhbAd3L_v}2>gwmDxutvQ z;A4-cb)p)qDvH%C2EcxCCXGJz*_4|&oMvb5p|AmK3)4itZ>1wRVA~Xx?NcS+LBSeR zfTITJfUDksH{5gjWSS?+UlcDu(!i=RC|+wbnhgl@7}SA5 z-k1CA!+~tdC9}Z$x8NArZfP6CV=9-y#F6By*Z)BeXiU`+W{UC!-kIx6L29gweh6SR z#_HPWb&G6;lPvgS{~M)>ng%}CDUs4(x>8+hNP7@C)Y0r6ol5H<9FKfY+NEGu z=kozQb}eL6e+V`_;Yx?x=$`;(ouZup6$9uCjIt?uL9z4+mu!#sX~FJPutOFe8R$?} z#D|~^<^=G`@03mTZqQzsqvFbH+F0aZ1Aw&Px@&LsCx*CI8+T18! z!#*o4C9?|5DRssKm3Sw{w9VXUpyaJ?Kug#TTgVr97Y~x?V$S4h?(=0G45hAut`y*# ziBz`jcF|zGJJ`M|rw#fZ`_PX(xy-^j^~NrGL@cpeB!j{;qF+9awG-L{FW2Bmd7o^R zAdX1xUMmr4#+G9bY*qkT$`3kRhe|+eKu-BDarld;tg5^PdkAr?lf9Vtm@}jP>hUag zO>>jyx{yikU2$$1Q!D?EFyQ=?!e?(4*%K^IL^<8tw#_V4Hxxyv=#f3zdbJ>*k(ppE z&|F*#ErWNJD=o!R4z=8C4{p&e9y~IUW^UY~vqEp|Gc#pt?DHPxw+tZEnedon)BOf$ ztiP%Os-^y9s_0I^KQ^nCxfBqq19Wss>%1SJke$JGIOU9QpFp06Q0-uo+t}x%AOOeA zr+b0DVQV+%OMGbmde=V#*sStd6xFdm1lo2?kt^Lzz^jN3wQZk!O$wfU%*|BGqc;oU z_@xT!p!H(+ItWmC+G&SbCb9vtH@DF#o&;yT)9Bb_09$EpX0Y5_4c5RqGeNV8z_~LM zq2t|}hL8Qe%7Ix%2vHI&zZ*xi9MbJ2et@^)Ckilc$a@NOl~O%7HWgnn>X_#ro><~0 zQ<=$|6_~hjdq%b1zu<;!oP9tW6}wn1jRMP_W`85w{xao z@m>P1P)G=rO{7&eRX#InP=gOtXYTX-+f+DqmkojuL8;|gw049jdVR7PG_Dp(XqYm9 zM?@I~2-xCpE6Q`9=%mq13Fs=fI^(9+7A2fd{qS&C;2ZaF|7RXQ58s+%1UA$+;NDo0bV$HthP zIGCv4O(#Z1(_y&HpM&Gsn8)@6uk>M%U#~mT28#^n8z~LY88r37Xvx z5=HeyFXhypxt)Mm(@*#B&7{wM@k^Kt8sOP8){Qo+DDy{-r;79y7O;pR ztTJE0S)02{2Zv%hK%{z)xo&4Yd)SxTcW$O0Xhj*KbkdB>ny`CPWqF2!Jx)h7p(;NY zKnPniZOu&4itRAtLwV{*Be%m;a%Z6&baB$!9>srMlgCn-=Kp=HZy!Qaxp|$=5!_?^ z2;66k5tO1f_arg|G)zN)zsty@7v-%X*+)REM7q}vf?g9Ppijj=Qz0bO(;V9%SH z^?nwi>2z2tU@uYWLFyhU1a=Tnp9*{hrbe?oB`~<~=o`MbdWLZ%pA|daAULu-#%O0 zCh!vsIS?rX9wjb)LXNG)Gp4eA?>4SA3NFQs$%wJKCh7y_pv8zU_o$=)1uU*vg*Awi zw0Kw7$joFf&ZAt>Xkp3!8H2W07j$J z0wbe;+EQDLd95wB21Z9&OY9;15wu3m|=ry)Xa>q57iwyeMXS8+7<5Q0X7-;vmlrE zXvZ#_1+qY_LBxNTLYd(H``XN{cE5(+3JL@FVaX%XDqv?5#5n3WOlH7w1LHH>%o%~T zmLv2g6o(x$D>a_u{*2#qqfXYm#6Arg0aPmYK2r>!#A(2JtMCkiY2i=^GLdKZwE9L{ zR^!Wgs1qe*&YBRNV?l43IdWf`jcF7KII>sEZDiM2*Vre@JoC}QJDY{tVXk#BEl$)i zFOfxYEIKwkNN7YnX(RUiCP5^zn1^;iK&M|lK^+`ndr=!XH$Y>C-yoCrTg*lJW+7;?PuO#jXDVc^%C`szU5$3Fz&BrpTX<7@DrYfKnpqJ zg zWts&V@>C-_;YMN-xD|hxbN`p3yCA6hDfe;X^9jNVmyh=i2ZJ%~QZec2b-(<{>r|G1 zKKIZHp=H{Sjzee2u* zJuN^%=^q=%5Mm@y7#;AXRkX@P@bGvit2;Gv!P8^Ino1)6B@6zAD_g;GU%C%HsBW}*z2dogK z!C(yaVFap>MOb@VqSP@)ofsW*&ZN-bRuN4Y15oNon#w|a7?(Xa*|a>v%Iam(_dlgi zv`1Zcv3Whc%G6Q+;yg-hhf@NHNHXU}N{p8E4GpBL$i^;hhUzF;xvc|4 zXJ6SWr$RJ3OK;a50CbT)%=FetGidyQLj!QhP!K$S;^O&qm$|OK`6fJD_>i!bH{ zxtkf|`010+olhN;W9i-9*El$aLJ6kNP7o!=*p63kyRLSC>wa1dh1hfA;WTu3EUo?g z=M-+u#J-bf+yQW|S@Dm2WB`M>RK~TQviPgD#wA4ec*o2z6JR7(;&pQz* z?4pNt4y9|f1(=pvC_*#Ju=@V%TnjIbIlXTLiV0r(_=B`V1UyT6xB+l3Lw%4kF!%aT z@u%qDOeD6*ap)f1I!H8O<@XOb50#W{)y{=wskmfFLpzu+=#Jp(Pwni}{5STS@Ft!SrAVD091Lu*RxX6Q? zF(wk(HAQOMEI=V0J1ltI*(RsJ8~`7J|6PX)qGPO{R99?bc ztKsk??e2FJ0Q&#~Ab>!QhL8mt!XuV|Y;=v_fT)Vc`UVAoJu!B8GAIIT@S|0SQAmQ5 z7obZJz{Too)uq7#0gA;X>T=tDSrFuR1+;c_5M+dd$v6UF^9<#AAER#+$GxpHC`gnW zSm4^&)BM}0u$*q+O`s)*vB&Hl&+&Qg4?$rEzZb9yxYg?(TWb-sK?;kobl424Sg0q! z)DU(7!nWbgt3ONF3k_$xbRMFk&Y{t3pDk}teNxmwGovNeCt64mPAbrs-J&FGC3FrV z>d6|Kb=m_^G(bVE+CK_|nJ%JbP%jvdC!%eUp)x|K!OCNhZGC9bceHL~E(D4?89bc# z0)+EB$E4vdQ6$z6-KT*)S|Feh3l%^UyMVnbBdZ#IL8b*AiYiWi#`h!d5GiqB$Z`C3SEmi;*upR--Z3#5-4W8@%HZQGS&!Jlcu+qfJzCX`21LK^ukQuVOjbn12(ek`MIe3yx5*)s z;*4#b4&<$s1FE8cPdOoKY?rO*JcVN>xNV3laD2^dnNbn6?rjPH$lN0PJM1HMi?-N> zuw#XEh6dy4n4o{VMaIw_1j*_&i!Lk3&K}P)BNQ3^R@Rd}>?GX#);s9)Yu`)3RVyjy2RCk}u`j-o7HFkphlB%oFd149 zYy-}QPCSw}@6FLC^*F-Lf+N_i6t1+xyQ+|$sYy5B9ekH83$#Z>HAgfvM#KOJfTF@UGQa+jdOk1gW_2nFOp%8MIZ5tYlV#n6Ch=$P21!a;WB zXhoYM6%2ngj4JX-&xQJCuDHqI&5xtPXr25PsirfT;D zTx^UZJ)W~P<3B*AVr%*)h7$z`N80zw=Q(hJXI0U0UG#gba0pCo{83H`Y<8hBikG8z zX7sANFO{n%)3Ix0el;D(}A{tPHwJe)*ICvgbyjF-im1VlN5*^44uHn$e;kA z6A&awma~TyMn@QRZ0;?Ax8TIF`YeHr5fj&0SyC>HnA%TRTO!!CXvwIZ_m!ID=l4si^8F}ebn^0dQ*e9S_j8JRLbqJwKX z*TNbNujyEIhtvnbu5w$R6F%5)KUG>4+FOdZsfwy8t?D zQ)w0XcU>5Z9D7Rk;*SSr1cyC-($NCoX`opfR3F`t3s+G3k zL^CfHo`dKS^itNCXDB+`D@({O4i4X`5!jLPU<6;i;%p&6v8HyYkaJyQJ=MzvFjgwk zgB)GRC2Tq5Po2u=p3G^4*r+p;%T1VL?F(~~1Mp|H69DD{dof3GTnW7&7)71K_uZei zAX6#~nwX*9;m{tP9VsJbR9BZ4$o|lxl;1`62z(-?$n{wVk5#LT7c8p2F;lP3a*gFh l8(8S4@PGF1OdJ~W{{c6Wrc(O>fVThu002ovPDHLkV1hH~l0*Oi literal 0 HcmV?d00001 diff --git a/papers/atharva_rasane/00_myst_template/main.md b/papers/atharva_rasane/00_myst_template/main.md new file mode 100644 index 0000000000..37e985084d --- /dev/null +++ b/papers/atharva_rasane/00_myst_template/main.md @@ -0,0 +1,788 @@ +--- +# Ensure that this title is the same as the one in `myst.yml` +title: AI driven Watermarking Technique for Safeguarding Text Integrity in the Digital Age +abstract: | + The internet's growth has led to a surge in text usage. Now, with public access to generative AI models like ChatGPT/Bard, identifying the source is vital. This is crucial due to concerns about copyright infringement and plagiarism. Moreover, it's essential to differentiate AI-generated text to curb misinformation from AI model hallucinations. + + In this paper, we explore text watermarking as a potential solution. We examine various methods, focusing on plain ASCII text in English. Our goal is to investigate different techniques, including physical watermarking (e.g., UniSpaCh by Por et al.), where text is modified to hide a binary message using Unicode Spaces, and logical watermarking (e.g., word context proposed by Jalil et al.), where a watermark key is generated via a defined process. While logical watermarking is difficult to break, it is not detectable without prior knowledge of the algorithm and parameters used. Conversely, physical watermarks are easily detected but also easy to break. + + This paper presents a unique physical watermarking technique based on word substitution to address these challenges. The core idea is that AI models consistently produce the same output for the same input. Initially, we replaced every ith word with a "[MASK]," then used a BERT model to predict the most probable token in place of "[MASK]." The resulting text constitutes the watermarked text. To verify, we reran the algorithm on the watermarked text and compared the input and output for similarity. + + The Python implementation of the algorithm in this paper employes models from the HuggingFace Transformer Library, namely "bert-base-uncased" and "distilroberta-base". The "[MASK]" placeholder was generated by splitting the input string using the `split()` function and then replacing every ith element in the list with "[MASK]". This modified list served as the input text for the BERT model, where the output corresponding to each "[MASK]" was replaced accordingly. Finally, applying the join() function to the list produces the watermarked text. + + This technique tends to generate nearly invisible watermarked text, preserving its integrity or completely changing the meaning of the text based on how similar the text is to the training dataset of BERT, which was observed when the algorithm was run on the story of Red Riding Hood, where its meaning was altered. However, the nature of this watermark makes it extremely difficult to break due to the black-box nature of the AI model. +--- + +## Introduction +The growth of the internet is primarily a the spread of web pages which in turn are written in HTML (Hyper Text Markup Language) consisting of lots and lots of text. Almost every webpage in some form or another contains text making it a popular mode of communication whether it be blogs, posts, articles, comments etc. Text can be generalized as a collection of integers or ASCII/Unicode values wherein each value is mapped to a particular character. + +With the majority of the internet and tools like ChatGPT and Bard being text-focused, we need to realize the importance of identifying the source of text whether due to copyright or to differentiate between AI-generated text and Human written text to prevent the flow of misinformation. The standard of detecting AI-generated text is with the use of another ML classifier which needs to be constantly trained on the latest AI-generated text data. This approach has a few drawbacks, one of which is the ever-changing nature of AI-generated text where we have bigger and better models that are giving more human-like text being released faster then ever before and thus we need a more standard/concrete approach, one that can be used regardless of the AI model i.e. we need a method of identifying that doesn't depend on one generating the text. One such approach is via the use of a watermark. + +Watermarks are an identifying pattern used to identify the origin of the data. In this case, we specifically want to focus on text watermarking (watermarking of plain text). Text watermarking can broadly be classified into 2 types Logical Embedding and Physical Embedding which in turn can be classified further. Logical Embedding involves the user generating a watermark key by some logic from the input text. Note that this means that the input text is not altered and the user instead keeps the generated watermark key to identify the text. Physical Embedding involves the user altering the input text itself to insert a message into it and the user instead runs an algorithm to find this message to identify the text. In this paper, we will propose an algorithm to watermark text using BERT (Bidirectional Encoder Representations from Transformers), a model introduced by Google whose main purpose is to replace a special symbol [MASK] with the most probable word given the context. + +BERT[@Atr03] (Bidirectional Encoder Representations from Transformers) is a pre-trained model introduced by Google in 2018 that has revolutionized natural language processing (NLP). At its core, BERT employs a bi-directional Transformer encoder which allows the model to understand context from both directions simultaneously, greatly enhancing its comprehension of text. BERT undergoes pre-training through two tasks: Masked Language Modeling (MLM), where certain words in a sentence are masked and the model predicts them based on surrounding words, and Next Sentence Prediction (NSP), which involves determining if one sentence logically follows another. This comprehensive training enables BERT to excel in numerous NLP applications like question answering, text classification, and named entity recognition. Given its deep understanding of context and semantics, BERT is highly relevant to text watermarking. Watermarking text involves embedding identifying patterns within the text to trace its origin, which can be critical for copyright protection and distinguishing between AI-generated and human-written content. BERT's sophisticated handling of language makes it ideal for embedding watermarks in a way that is subtle yet robust, ensuring that the text remains natural while the watermark is detectable. This capability provides a more stable and reliable method for watermarking text, irrespective of the model generating the text, therefore offering a concrete solution amidst the evolving landscape of AI-generated content. + +## Related Work +Related Work (Modified) +In this paper, we will discuss two text watermarking algorithms in detail before delving into the suggested watermarking technique. First, let's examine the current standards for text watermarking. Word context, developed by Jalil et al.,In [@Proc01], a type of logical watermarking is developed in which a watermark key is generated without changing the source text. UniSpaCh [@Atr01], on the other hand, modifies the text's white spaces in order to implant a binary message directly into the text. + +In word context, the author selects a keyword. For the purpose of this paper, lets consider an example, let's say the keyword is "is" and the text is *"Pakistan is a developing country, with Islamabad is the capital of Pakistan. It is located in Asia."*. to generate the watermark we record the length of the words preceding and following the chosen keyword. In this case, those will be, "Pakistan" and "a", "Islamabad" and "the," and finally "It" and "located." We then append the lengths of these words one after the other, creating our watermark, which is 8-1-9-3-2-7. + +Using 2-bit categorization, UniSpaCh [@Atr04] proposes a masking strategy that creates and isolates a binary string SM (e.g., "10, 01, 00, and 11"). Every two bits are replaced with a unique space (such as a punctuation space, thin space, hair space, or six-per-em space). After that, the created file spaces are incorporated into certain areas, including the spaces between words, sentences, lines, and paragraphs. Because of the cover text, this method ensures a high degree of invisibility, but it has a low capacity (two bits per space) and is unsuitable for applications that need to integrate long-secret messages into brief cover messages. + +The first approach [@Proc01] is not appropriate for today's world, especially with regard to AI-generated text, as we can generate new text faster and easier than ever before, making it impractical to store a logical watermark for each one. The second approach [@Atr04] is also not appropriate because it is relatively simple to reformat the text in order to remove the watermark; therefore, we require a watermarking technique that is both robust and imperceptible. + +The technique presented in this paper is based on one that Lancaster [@Atr02] proposed for ChatGPT. In that method, he describes a way to generate watermarked text by replacing every fifth word from each non-overlapping 5-gram (a sequence of five consecutive words such that no sequence has overlapping words) with a word that is generated using a fixed random seed. Consider the following line, for instance: “The friendly robot greeted the visitors with a cheerful beep and a wave of its metal arms.” the non-overlapping 5 grams ignoring punctuation will be “The friendly robot greeted the”, “visitors with a cheerful beep” and “and a wave of its metal” here we will replace the words "the", "visitors" and "metal" using words generated by ChatGPT with a fixed random seed. The watermark will be checked using overlapping 5-grams, which are sequences of five consecutive words that overlap with each other except for one word. For the same example, the overlapping 5-grams will be "The friendly robot greeted the", "friendly robot greeted the visitors", "robot greeted the visitors with," and so on. The beauty of the approach is that we are using ChatGPT to watermark itself. However, this also means that we need to run two separate models of ChatGPT (for the sake of standardization as multiple models of ChatGPT are available to users and each model may give different output on the same random seed) or run a second model on the generated text. + +In this paper, we suggest using BERT to overcome this, a model created for discovering missing words, as a much superior substitute for ChatGPT because it is more precise and smaller. Not that BERT will necessarily produce better results than ChatGPT; rather, because of its bidirectional nature, which allows us to use more context for word prediction, essentially increases the amount of context that could potentially lead to better results than ChatGPT, which only uses the context of the previous words to predict the next word. While ChatGPT-based algorithms will work best for ChatGPT-generated text, using BERT will allow us to expand our horizons beyond AI-generated text to any text, regardless of its origin. + + +## Proposed Model +BERT-based watermarking is derived from the 5-gram approach by Lancaster[@Atr02], but here the focus is watermarking of any text in general regardless of its origin. This paper will mainly use **bert-base-uncased** autoencoding language which finds the most probable uncased English token in place of the [MASK] token. + +Note that a different variant of BERT can be trained on different language datasets and thus will generate a different result and as such the unique identity to consider here is the BERT model i.e. if the user wants a unique watermark they need to train/develop the BERT model on their own. This paper is not concerned with the type of BERT model and is focused on its conceptual application for watermarking. Thus for us, BERT is a black box model that returns the most probable word given the context with the only condition being that it has a constant temperature i.e. it doesn't hallucinate (produce different results for the same input). For our purposes, you can think of the proposed algorithm as a many to one function which is responsible for converting the input text into a subset of watermarked set. + +## Algorithm +**Watermark Encoding** +:::{figure} Algorithm-Encoding.png +:label: fig:1 +Encoding algorithm to watermark input text +::: + +The above is a simple implementation of the algorithm where we are assuming +1. The only white spaces in the text are " ". +2. BERT model has infinite context. + +This simplified code allows us to grasp the core of the algorithm here we first simply split the input text into a list of words, in this case, it's with a simple spit() function due to our assumption that only white spaces are spaces, then we replace every 5th word with the [MASK] token this is a special token that tells BERT at which position to replace the word and can vary from model to model, then for every single [MASK] token in the list we pass all the words preceding and the 4 words proceeding the [MASK] here we assume that the BERT model has infinite context size which isn't true for BERT so, in that case, we will pass upto maximum_context_size - 5 words along with the [MASK] token, missing_word_form_BERT() then returns the most probable missing word which replaces the respective [MASK] token in the list this will continue until all [MASK] tokens are replaced and finally we call the " ".join() to convert the list words into a string. + +The beauty of the algorithm is that if we were to run it again on the watermarked text the output that we would get would be the same as the input thus to check if a given text is watermarked we simply need to compare the input and output to determine if a given text is watermarked we simply need to run the above algorithm again, but with a few changes we will have to take in offset as a consideration as the one plagiarizing the text might insert additional words that may lead to the text + +**Watermark Detection** + +The algorithm checks if a given text is watermarked by comparing the input and output texts, considering possible word insertions that may offset the watermark pattern. + +1. Input Text Preparation : Obtain the suspected watermarked text as input. + +2. Run Watermark Detection Algorithm: Run the watermark detection algorithm on the input text. + +3. Compare Input and Output: If the input matches the output, the text is watermarked.If not, proceed to check with offsets. + +4. Offset Consideration: Initialize an array to store match percentages for each offset: `offsets = [0, 1, 2, 3, 4]`.For each offset, adjust the input text by removing `n % 5` words where `n` is the number of words added. + +5. Check for Matches: For each offset, count the matches where the watermark pattern (every 5th word replaced) aligns. + +6. Store Match Percentages: Calculate the percentage of matches for each offset and store them. + +7. Statistical Analysis: Compute the highest percentage of matches (`Highest Ratio`). Compute the average percentage of matches for the remaining offsets (`Average Others`). Calculate the T-Statistic and P-Value to determine the statistical difference between `Highest Ratio` and `Average Others`. + +8. Classification: Use a pre-trained model to classify the text based on the metrics (`Highest Ratio`, `Average Others`, T-Statistic, P-Value) as watermarked or not. + + +## Implementation - Encoding module +Lets take a look at a potential python code implementation for the proposed watermarking model. A "watermark_text" module identifies every 5th word in the given input string, splits them using python's in-built split() library and tracks them as the words to be modified using BERT. These word placeholders are replaced with a "[MASK]" token. +While we are using the BERT model here, the module can easily be scaled to adapt with other AI models. The choice of BERT is solely due to its efficiency in altering individual words. +** The choice of index = 5 is because?** +** Also is this code picked from a paper?** + +```python +import os +os.environ['HUGGINGFACEHUB_API_TOKEN'] = '' +from transformers import pipeline, AutoTokenizer, AutoModelForMaskedLM +import torch + +def watermark_text(text, model_name="bert-base-uncased", offset=0): + # Clean and split the input text + text = " ".join(text.split()) + words = text.split() + + # Replace every fifth word with [MASK], starting from the offset + for i in range(offset, len(words)): + if (i + 1 - offset) % 5 == 0: + words[i] = '[MASK]' + + # Initialize the tokenizer and model, move to GPU if available + device = 0 if torch.cuda.is_available() else -1 + tokenizer = AutoTokenizer.from_pretrained(model_name) + model = AutoModelForMaskedLM.from_pretrained(model_name).to(device) + + # Initialize the fill-mask pipeline + classifier = pipeline("fill-mask", model=model, tokenizer=tokenizer, device=device) + + # Make a copy of the words list to modify it + watermarked_words = words.copy() + + # Process the text in chunks + for i in range(offset, len(words), 5): + chunk = " ".join(watermarked_words[:i+9]) + if '[MASK]' in chunk: + try: + tempd = classifier(chunk) + except Exception as e: + print(f"Error processing chunk '{chunk}': {e}") + continue + + if tempd: + templ = tempd[0] + temps = templ['token_str'] + watermarked_words[i+4] = temps.split()[0] + # print("Done ", i + 1, "th word") + + return " ".join(watermarked_words) + +# Example usage +text = "Quantum computing is a rapidly evolving field that leverages the principles of quantum mechanics to perform computations that are infeasible for classical computers. Unlike classical computers, which use bits as the fundamental unit of information, quantum computers use quantum bits or qubits. Qubits can exist in multiple states simultaneously due to the principles of superposition and entanglement, providing a significant advantage in solving complex computational problems." +watermark_text(text, offset=0) +result = "Quantum computing is a rapidly evolving field that leverages the principles of quantum mechanics to perform computations that are impossible for classical computers. Unlike quantum computers, which use bits as the fundamental unit of , quantum computers use quantum bits or qubits. Qubits can exist in multiple states simultaneously according to the principles of symmetry and entanglement, providing a significant advantage in solving complex mathematical problems." +``` +In the result, the module has replaced each 5th word with the most probable replacement word selected by BERT. There will always be some words that AI wouldn't alter. For example the 10th word "the" and the 15th word "to". These cannot be changed by AI without altering the entire sentence. +Further, to speed up the AI computing, we can employ GPUs in this module as well as the Detection module. + +## Implementation - Detection Module +Now that we know what our watermarked text will look like, hereforth we will assume that this is the published text that a plagiarizer will have access to and we are tasked with identifying if there is copyright infringement. + +For this, we create a module to check the number of word matches we will get if the AI model with same offset parameter is run on the watermarked again. The elegance of the algorithm lies in its consistency: if we run it again on a watermarked text, the output will match the input as the most probable words are already present at every ith offset of the text and their position has not been altered. Consequently we would get 100% match rate with a match ratio of 1. If all the ith words were altered our match rate would be 0%. would get 0 matches with a match ratio of 0. + +The use of AI models to improve one's written text has become quite common. When a AI model is given a sub text, it is likely to alter and/or shuffle an entire sentence based on context as opposed to individual words. Which means, our model needs to ensure it is looking for watermark in not only at the ith index, but also the words leading upto it. Which means our module will loop for offsets 1 to 5. and check for matches. This also covers the scenario where a plagiarizer might insert extra words, causing the input not to match the output exactly. + +Here's how the offset works: +- If 1 word is added at the start, the offset is 1. +- If 2 words are added, the offset is 2. +- If 3 words are added, the offset is 3. +- If 4 words are added, the offset is 4. +- If 5 words are added, the offset is 0 (since the algorithm replaces every 5th word). + +In general, if 'n' words are added, the offset is `n % 5`. Since we don't know how many words were added, we need to check all possible offsets (0, 1, 2, 3, 4). + +If words are added in the middle of the text, the majority of the watermark pattern (every 5th word replaced) will still be detectable at some offset. The idea is that one offset will show a higher number of matches compared to others, indicating a watermark. + +For detection, we store the percentage of matches for each offset. There is no fixed threshold for determining a watermark, as the choice of words affects the number of matches. For non-watermarked text, the percentage of matches at each offset will be similar. For watermarked text, one offset will have a significantly higher percentage of matches. + +The output of "watermark_text_and_calculate_matches" module is a match ratio for offsets 0-4 acting as a seed for the next stage of detection. Below is the python code for generating the list of match ratios. + +```python +def watermark_text_and_calculate_matches(text, model_name="bert-base-uncased", max_offset=5): + # Clean and split the input text + text = " ".join(text.split()) + words = text.split() + + # Initialize the tokenizer and model, move to GPU if available + device = 0 if torch.cuda.is_available() else -1 + tokenizer = AutoTokenizer.from_pretrained(model_name) + model = AutoModelForMaskedLM.from_pretrained(model_name).to(device) + + # Initialize the fill-mask pipeline + classifier = pipeline("fill-mask", model=model, tokenizer=tokenizer, device=device) + + # Dictionary to store match ratios for each offset + match_ratios = {} + + # Loop over each offset + for offset in range(max_offset): + # Replace every fifth word with [MASK], starting from the offset + modified_words = words.copy() + for i in range(offset, len(modified_words)): + if (i + 1 - offset) % 5 == 0: + modified_words[i] = '[MASK]' + + # Make a copy of the modified words list to work on + watermarked_words = modified_words.copy() + total_replacements = 0 + total_matches = 0 + + # Process the text in chunks + for i in range(offset, len(modified_words), 5): + chunk = " ".join(watermarked_words[:i+9]) + if '[MASK]' in chunk: + try: + tempd = classifier(chunk) + except Exception as e: + print(f"Error processing chunk '{chunk}': {e}") + continue + + if tempd: + templ = tempd[0] + temps = templ['token_str'] + original_word = words[i+4] + replaced_word = temps.split()[0] + watermarked_words[i+4] = replaced_word + + # Increment total replacements and matches + total_replacements += 1 + if replaced_word == original_word: + total_matches += 1 + + # Calculate the match ratio for the current offset + if total_replacements > 0: + match_ratio = total_matches / total_replacements + else: + match_ratio = 0 + + match_ratios[offset] = match_ratio + + # Return the match ratios for each offset + return match_ratios + +# Example usage +text = "Quantum computing is a rapidly evolving field that leverages the principles of quantum mechanics to perform computations that are infeasible for classical computers. Unlike classical computers, which use bits as the fundamental unit of information, quantum computers use quantum bits or qubits. Qubits can exist in multiple states simultaneously due to the principles of superposition and entanglement, providing a significant advantage in solving complex computational problems." + +# Calculate match ratios +match_ratios = watermark_text_and_calculate_matches(text, max_offset=5) +result = {0: 0.5384615384615384, 1: 0.6153846153846154, 2: 0.5833333333333334, 3: 0.6666666666666666, 4: 0.5833333333333334} +``` + +The final stage of detection involves determining if the match ratios are statistically significant. +To determine whether the text is watermarked, we rely on a binary classification of whether a text is watermarked. For this, we use a pre-trained model based on metrics including Highest Ratio, Average Others, T-Statistic, and P-Value. This approach is necessary because, as illustrated in the graphs later, there is no discernible or easily observable difference between the T-statistics and P-values of watermarked and non-watermarked texts. Consequently, we resort to using a pre-trained model for classification, which has achieved the highest accuracy of 94%. + +The module "check_significant_difference" generates a list of significance. + +```python +from scipy.stats import ttest_1samp +import numpy as np + +def check_significant_difference(match_ratios): + # Extract ratios into a list + ratios = list(match_ratios.values()) + + # Find the highest ratio + highest_ratio = max(ratios) + + # Find the average of the other ratios + other_ratios = [r for r in ratios if r != highest_ratio] + average_other_ratios = np.mean(other_ratios) + + # Perform a t-test to compare the highest ratio to the average of the others + t_stat, p_value = ttest_1samp(other_ratios, highest_ratio) + + # Print the results + print(f"Highest Match Ratio: {highest_ratio}") + print(f"Average of Other Ratios: {average_other_ratios}") + print(f"T-Statistic: {t_stat}") + print(f"P-Value: {p_value}") + + # Determine if the difference is statistically significant (e.g., at the 0.05 significance level) + if p_value < 0.05: + print("The highest ratio is significantly different from the others.") + else: + print("The highest ratio is not significantly different from the others.") + + return [highest_ratio, average_other_ratios, t_stat, p_value] + +# Example usage +text = "Quantum computing is a rapidly evolving field that leverages the principles of quantum mechanics to perform computations that are infeasible for classical computers. Unlike classical computers, which use bits as the fundamental unit of information, quantum computers use quantum bits or qubits. Qubits can exist in multiple states simultaneously due to the principles of superposition and entanglement, providing a significant advantage in solving complex computational problems." +# match_ratios = watermark_text_and_calculate_matches(text, max_offset=5) +# check_significant_difference(match_ratios) + +``` +The module "randomly_add_words" was created to simulate the scenario where additional words have been added to the watermarked test for testing purposes. + +```python +import random + +def randomly_add_words(text, words_to_add, num_words_to_add): + # Clean and split the input text + text = " ".join(text.split()) + words = text.split() + + # Insert words randomly into the text + for _ in range(num_words_to_add): + # Choose a random position to insert the word + position = random.randint(0, len(words)) + # Choose a random word to insert + word_to_insert = random.choice(words_to_add) + # Insert the word at the random position + words.insert(position, word_to_insert) + + # Join the list back into a string and return the modified text + modified_text = " ".join(words) + return modified_text + +# Example usage +text = "Quantum computing is a rapidly evolving field that leverages the principles of quantum mechanics to perform computations that are infeasible for classical computers. Unlike classical computers, which use bits as the fundamental unit of information, quantum computers use quantum bits or qubits. Qubits can exist in multiple states simultaneously due to the principles of superposition and entanglement, providing a significant advantage in solving complex computational problems." +words_to_add = ["example", "test", "random", "insert"] +num_words_to_add = 5 + +# modified_text = randomly_add_words(text, words_to_add, num_words_to_add) +modified_text = randomly_add_words(watermark_text(text, offset=0), words_to_add, num_words_to_add) +(result) modified_text = "Quantum computing is example a rapidly evolving field that leverages the principles of quantum mechanics to perform random computations that are impossible for classical computers. Unlike quantum computers, which use bits as the random insert fundamental unit of , quantum computers use quantum bits or qubits. Qubits can exist in multiple states simultaneously according random to the principles of symmetry and entanglement, providing a significant advantage in solving complex mathematical problems." + +match_ratios = watermark_text_and_calculate_matches(modified_text, max_offset=5) +(result) match_ratios = {0: 0.5714285714285714, 1: 0.5714285714285714, 2: 0.5384615384615384, 3: 0.38461538461538464, 4: 0.7692307692307693} + +check_significant_difference(match_ratios) +(result) + Highest Match Ratio: 0.7692307692307693 + Average of Other Ratios: 0.5164835164835164 + T-Statistic: -5.66220858504931 + P-Value: 0.010908789440745323 +The highest ratio is significantly different from the others. +``` + +Once the list of significance is defined, to show the significance of using a pre-trained model, lets plot them to futher understand the statistical summary. Here is the python code used to generate the plots. + +```python +import pandas as pd +import seaborn as sns +import matplotlib.pyplot as plt +from scipy.stats import ttest_ind +from sklearn.model_selection import train_test_split +from sklearn.ensemble import RandomForestClassifier +from sklearn.metrics import classification_report, confusion_matrix + +# Assuming list_of_significance and list_of_significance_watermarked are already defined +# Create DataFrames from the lists +df_significance = pd.DataFrame(list_of_significance, columns=['Highest Ratio', 'Average Others', 'T-Statistic', 'P-Value']) +df_significance_watermarked = pd.DataFrame(list_of_significance_watermarked, columns=['Highest Ratio', 'Average Others', 'T-Statistic', 'P-Value']) + +# Add a label column to distinguish between the two sets +df_significance['Label'] = 'Original' +df_significance_watermarked['Label'] = 'Watermarked' + +# Combine the DataFrames +combined_df = pd.concat([df_significance, df_significance_watermarked], ignore_index=True) + +# Perform EDA +def perform_eda(df): + # Display the first few rows of the DataFrame + print("First few rows of the DataFrame:") + print(df.head()) + + # Display statistical summary + print("\nStatistical Summary:") + print(df.describe()) + + # Check for missing values + print("\nMissing Values:") + print(df.isnull().sum()) + + # Visualize the distributions of the features + plt.figure(figsize=(12, 8)) + sns.histplot(data=df, x='Highest Ratio', hue='Label', element='step', kde=True) + plt.title('Distribution of Highest Ratio') + plt.show() + + plt.figure(figsize=(12, 8)) + sns.histplot(data=df, x='Average Others', hue='Label', element='step', kde=True) + plt.title('Distribution of Average Others') + plt.show() + + plt.figure(figsize=(12, 8)) + sns.histplot(data=df, x='T-Statistic', hue='Label', element='step', kde=True) + plt.title('Distribution of T-Statistic') + plt.show() + + plt.figure(figsize=(12, 8)) + sns.histplot(data=df, x='P-Value', hue='Label', element='step', kde=True) + plt.title('Distribution of P-Value') + plt.show() + + # Pairplot to see relationships + sns.pairplot(df, hue='Label') + plt.show() + + # Correlation matrix + plt.figure(figsize=(10, 8)) + sns.heatmap(df.drop(columns=['Label']).corr(), annot=True, cmap='coolwarm') + plt.title('Correlation Matrix') + plt.show() + + # T-test to check for significant differences + original = df[df['Label'] == 'Original'] + watermarked = df[df['Label'] == 'Watermarked'] + + for column in ['Highest Ratio', 'Average Others', 'T-Statistic', 'P-Value']: + t_stat, p_value = ttest_ind(original[column], watermarked[column]) + print(f"T-test for {column}: T-Statistic = {t_stat}, P-Value = {p_value}") + +# Perform EDA on the combined DataFrame +perform_eda(combined_df) + +# Check if the data is ready for machine learning classification + +# Prepare the data +X = combined_df.drop(columns=['Label']) +y = combined_df['Label'] + +# Convert labels to numerical values for ML model +y = y.map({'Original': 0, 'Watermarked': 1}) + +# Split the data into training and testing sets +X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) + +# Train a RandomForestClassifier +clf = RandomForestClassifier(random_state=42) +clf.fit(X_train, y_train) + +# Make predictions +y_pred = clf.predict(X_test) + +# Evaluate the model +print("\nClassification Report:") +print(classification_report(y_test, y_pred)) + +print("\nConfusion Matrix:") +print(confusion_matrix(y_test, y_pred)) + +# Feature importances +feature_importances = clf.feature_importances_ + +# Create a DataFrame for feature importances +feature_importances_df = pd.DataFrame({ + 'Feature': X.columns, + 'Importance': feature_importances +}).sort_values(by='Importance', ascending=False) + +# Plot feature importances +plt.figure(figsize=(12, 8)) +sns.barplot(x='Importance', y='Feature', data=feature_importances_df, palette='viridis') +plt.title('Feature Importances') +plt.show() + +# Heatmap for feature importances +plt.figure(figsize=(10, 8)) +sns.heatmap(feature_importances_df.set_index('Feature').T, annot=True, cmap='viridis') +plt.title('Heatmap of Feature Importances') +plt.show() +``` + +The plots are created using the result from our previous example with check_significant_difference returned: + Highest Match Ratio: 0.7692307692307693 + Average of Other Ratios: 0.5164835164835164 + T-Statistic: -5.66220858504931 + P-Value: 0.010908789440745323 + +```{list-table} First few rows of the DataFrame +:label: tbl:Dataframe +:header-rows: 1 +* - + - Highest Ratio + - Average Others + - T-Statistic + - P-Value + - Label +* - 0 + - 0.233333 + - 0.182203 + - -3.532758 + - 0.038563 + - Original +* - 1 + - 0.203390 + - 0.139195 + - -3.440591 + - 0.041218 + - Original +* - 2 + - 0.338983 + - 0.270339 + - -2.228608 + - 0.112142 + - Original +* - 3 + - 0.254237 + - 0.168362 + - -2.451613 + - 0.246559 + - Original +* - 4 + - 0.288136 + - 0.210876 + - -5.467540 + - 0.012026 + - Original +``` +```{list-table} Statistical Summary +:label: tbl:Statistical_Summary +:header-rows: 1 +* - + - Highest Ratio + - Average Others + - T-Statistic + - P-Value +* - count + - 4000.000000 + - 4000.000000 + - 3999.000000 + - 3999.000000 +* - mean + - 0.490285 + - 0.339968 + - -6.076672 + - 0.036783 +* - std + - 0.128376 + - 0.082900 + - 5.580957 + - 0.043217 +* - min + - 0.101695 + - 0.066667 + - -111.524590 + - 0.000002 +* - 25% + - 0.416667 + - 0.296610 + - -6.938964 + - 0.006418 +* - 50% + - 0.491525 + - 0.354732 + - -4.431515 + - 0.021973 +* - 75% + - 0.573770 + - 0.398224 + - -3.176861 + - 0.052069 +* - max + - 0.868852 + - 0.580601 + - -1.166065 + - 0.451288 +``` + +Missing Values: +Highest Ratio 0 +Average Others 0 +T-Statistic 1 +P-Value 1 +Label 0 +dtype: int64 + +:::{figure} Distribution_of_highest_ratio.png +:label: fig:2 +Distribution of highest ratio +::: + +:::{figure} Distribution_of_average_others.png +:label: fig:3 +Distribution of average others +::: + +:::{figure} Distribution_of_t-statistics.png +:label: fig:4 +Distribution of t-statistics +::: + +:::{figure} Distribution_of_P-value.png +:label: fig:5 +Distribution of P-value +::: + +:::{figure} Dataset.png +:label: fig:6 +Dataset +::: + +:::{figure} Correlation_Matrix.png +:label: fig:7 +Correlation_Matrix +::: + +From the graphs and statistical summaries, several inferences can be drawn regarding the distributions and relationships between the variables in the dataset: + +**Distribution of Highest Ratio:** +The distribution of the "Highest Ratio" variable shows a clear distinction between the "Original" and "Watermarked" categories. +The "Original" category has a peak around 0.4, while the "Watermarked" category peaks around 0.5, indicating a shift in the distribution towards higher values for the watermarked data. + +**Distribution of Average Others:** +Similarly, the "Average Others" variable shows a distinction between the two categories. +The "Original" category peaks around 0.3, whereas the "Watermarked" category peaks slightly higher, around 0.4. +This suggests that the average values for other ratios are higher in the watermarked data compared to the original data. +Distribution of T-Statistic: + +**Distribution of T-statistic:** +The distribution of the T-statistic is highly skewed to the left for both categories, with a long tail extending to very negative values. +The "Original" category appears to have a more pronounced peak near 0, while the "Watermarked" category has a lower count at the peak and a wider spread. + +**Distribution of P-Value:** +The P-value distribution is heavily skewed towards 0 for both categories, with the "Watermarked" category showing a sharper peak at 0. +This suggests that most of the tests result in very low p-values, indicating strong statistical significance in the differences observed. + +**Pair Plot:** +The pair plot provides a visual comparison of the relationships between the variables for the two categories. +There are clear clusters and separations between the "Original" and "Watermarked" categories in the scatter plots, particularly for "Highest Ratio" vs. "Average Others" and "Highest Ratio" vs. "P-Value". +This reinforces the idea that the watermarked data exhibits different characteristics compared to the original data. + +**Correlation Matrix:** +The correlation matrix shows the pairwise correlation coefficients between the variables. +"Highest Ratio" and "Average Others" are positively correlated (0.66), indicating that higher values of the highest ratio tend to be associated with higher average values of other ratios. +"T-Statistic" has a negative correlation with "Highest Ratio" (-0.35) and "P-Value" (-0.31), suggesting that higher ratios tend to result in more negative T-statistics and lower p-values. + +**Overall Observations:** +The "Watermarked" data tends to have higher ratios and averages compared to the "Original" data. +The T-statistics and p-values indicate strong statistical differences between the original and watermarked categories. +The pair plot and correlation matrix provide further evidence of distinct patterns and relationships in the watermarked data compared to the original data. + +While these plots do show a difference between the watermarked and non-watermarked text, using a pre-trained model help us achieve higher efficiency and consistency in our comparisons. + +## Model Training, Testing and Efficiency +The Alogrithm in this paper was trained using Gutenberg's top 10 books[@book01], [@book02], [@book03], [@book04], [@book05], [@book06], [@book07], [@book08], [@book09], [@book10] as a dataset and tested with 2000 test cases. The results have been captured in Results.csv. +With this data, this model is able to identify watermarked text with an accuracy of 94%. + +**Code used for model training** +```python +import pandas as pd +import seaborn as sns +import matplotlib.pyplot as plt +from sklearn.model_selection import train_test_split +from sklearn.linear_model import LogisticRegression +from sklearn.tree import DecisionTreeClassifier +from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier +from sklearn.svm import SVC +from sklearn.naive_bayes import GaussianNB +from sklearn.neighbors import KNeighborsClassifier +from sklearn.metrics import classification_report, confusion_matrix + +# Assuming list_of_significance and list_of_significance_watermarked are already defined +# Create DataFrames from the lists +df_significance = pd.DataFrame(list_of_significance, columns=['Highest Ratio', 'Average Others', 'T-Statistic', 'P-Value']) +df_significance_watermarked = pd.DataFrame(list_of_significance_watermarked, columns=['Highest Ratio', 'Average Others', 'T-Statistic', 'P-Value']) + +# Add a label column to distinguish between the two sets +df_significance['Label'] = 'Original' +df_significance_watermarked['Label'] = 'Watermarked' + +# Combine the DataFrames +combined_df = pd.concat([df_significance, df_significance_watermarked], ignore_index=True) +combined_df = combined_df.dropna() + +# Prepare the data +X = combined_df.drop(columns=['Label']) +y = combined_df['Label'] + +# Convert labels to numerical values for ML model +y = y.map({'Original': 0, 'Watermarked': 1}) + +# Split the data into training and testing sets +X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) + +# Initialize models +models = { + 'Logistic Regression': LogisticRegression(random_state=42, max_iter=1000), + 'Decision Tree': DecisionTreeClassifier(random_state=42), + 'Random Forest': RandomForestClassifier(random_state=42), + 'Support Vector Machine': SVC(random_state=42), + 'Gradient Boosting': GradientBoostingClassifier(random_state=42), + 'AdaBoost': AdaBoostClassifier(random_state=42), + 'Naive Bayes': GaussianNB(), + 'K-Nearest Neighbors': KNeighborsClassifier() +} + +# Train and evaluate models +for model_name, model in models.items(): + model.fit(X_train, y_train) + y_pred = model.predict(X_test) + print(f"\n{model_name} Classification Report:") + print(classification_report(y_test, y_pred)) + print(f"\n{model_name} Confusion Matrix:") + print(confusion_matrix(y_test, y_pred)) + + # Feature importances (only for models that provide it) + if hasattr(model, 'feature_importances_'): + feature_importances = model.feature_importances_ + feature_importances_df = pd.DataFrame({ + 'Feature': X.columns, + 'Importance': feature_importances + }).sort_values(by='Importance', ascending=False) + + # Plot feature importances + # plt.figure(figsize=(12, 8)) + # sns.barplot(x='Importance', y='Feature', data=feature_importances_df, palette='viridis') + # plt.title(f'{model_name} Feature Importances') + plt.show() +``` + +**Code for Model testing** +```python +import os +import random + +def extract_test_cases(folder_path, num_cases=2000, words_per_case=300): + test_cases = [] + book_files = [f for f in os.listdir(folder_path) if os.path.isfile(os.path.join(folder_path, f))] + + # Calculate the number of test cases to extract from each book + cases_per_book = num_cases // len(book_files) + extra_cases = num_cases % len(book_files) + + for book_file in book_files: + with open(os.path.join(folder_path, book_file), 'r', encoding='utf-8') as file: + text = file.read() + words = text.split() + num_words = len(words) + + # Ensure enough words are available to extract the cases + if num_words < words_per_case: + continue + + # Determine the number of cases to extract from this book + num_cases_from_book = cases_per_book + if extra_cases > 0: + num_cases_from_book += 1 + extra_cases -= 1 + + for _ in range(num_cases_from_book): + start_index = random.randint(0, num_words - words_per_case) + case = ' '.join(words[start_index:start_index + words_per_case]) + test_cases.append(case) + + if len(test_cases) == num_cases: + return test_cases + + return test_cases + +# Usage example +folder_path = 'books' +test_cases = extract_test_cases(folder_path) +``` +```python +list_of_significance = [] +list_of_significance_watermarked = [] +count_t = 0 +for text in test_cases: + count_t+=1 + print("___________________________________________________________________________________________________________________________") + print("Doing", count_t) + print("___________________________________________________________________________________________________________________________") + + words_to_add = ["example", "test", "random", "insert"] + num_words_to_add = 5 + + modified_text = randomly_add_words(watermark_text(text, offset=0), words_to_add, num_words_to_add) + + match_ratios = watermark_text_and_calculate_matches(modified_text, max_offset=5) + list_of_significance_watermarked.append(check_significant_difference(match_ratios)) + + match_ratios = watermark_text_and_calculate_matches(text, max_offset=5) + list_of_significance.append(check_significant_difference(match_ratios)) + + print("___________________________________________________________________________________________________________________________") + print("Done", count_t, ) + print("___________________________________________________________________________________________________________________________") +``` + +## Analysis of the Algorithm + +**Strengths:** +1. Robustness against attacks: The BERT-based watermarking algorithm uses the sophisticated context-understanding capability of BERT to embed watermarks. This makes the watermark integration deeply intertwined with the text's semantic structure, which is difficult to detect and remove without altering the underlying meaning, thus providing robustness against simple text manipulation attacks. +2. Comparison with existing methods: Compared to traditional watermarking methods like word context and UniSpaCh, the BERT-based approach offers a more adaptable and less detectable method. It does not rely on altering visible text elements or patterns easily erased, like white spaces or specific word sequences. Instead, it uses semantic embedding, making it superior in maintaining the natural flow and readability of the text. +3. Scalability and adaptability: The method is scalable to different languages and text forms by adjusting the BERT model used. It can be adapted to work with different BERT variants trained on specific datasets, enhancing flexibility in deployment. + +**Challenges:** +1. Dependency on model consistency: The watermark detection relies heavily on the consistency of the BERT model's output. Any updates or changes in the model could potentially alter the watermark, making it undetectable. If the watermark can embbed some sort of version history and control, this could be managed. +2. Data Integrity is highly dependent on the Model: the integrity of the watermarked text depends on how good the model is at replacing the given word, due to the nature of AI-generated text where all the previous tokens are used to generate new ones BERT watermarking can preserve integrity much more effectively. However if it were to watermark text which is completely different from its training dataset it might return an incoherent output, for example if the dataset of BERT consists of scientific papers it will struggle immensely when trying to watermark fairy tails. +3. Potential for false positives/negatives: Given the probabilistic nature of BERT's predictions, there's a risk of incorrect watermark detection, especially in texts with complex semantics or those that closely mimic the watermark patterns without actually being watermarked. +4. Potential loss of context: When words are replaced, the intended context of delivery could be altered. However, AI models are continually improving, and we hope that a well-trained model can significantly mitigate this risk. + +**Real-world applicability:** +1. Versatility in applications: This method can be applied across various fields such as copyright protection, and content authentication, and in legal and academic settings where proof of authorship is crucial. It's particularly beneficial for managing copyrights in digital media, academic papers, and any online content where text is dynamically generated or reused. +2. Integration with existing systems: The algorithm can be seamlessly integrated with current content management systems (CMS) and digital rights management (DRM) systems, enhancing their capabilities to include advanced text watermarking features. This integration helps organizations maintain control over their content distribution and monitor usage without invasive methods. +3. Application in AI-generated text: With the proliferation of AI-generated content from models like ChatGPT, GPT-4, and other AI writing assistants, distinguishing between human-generated and AI-generated text becomes crucial. The BERT-based watermarking can be used to embed unique, non-intrusive identifiers into AI-generated texts, ensuring that each piece of content can be traced back to its source. This is particularly valuable in preventing the spread of misinformation, verifying the authenticity of content, and in applications where copyright claims on AI-generated content might be disputed. +4. Forensic Linguistics in Cybersecurity: In cybersecurity, determining the origin of phishing emails or malicious texts can be crucial. BERT-based watermarking can assist forensic linguists and security professionals by providing a means to trace the origins of specific texts back to their creators, helping to identify patterns or sources of cyber threats. +5. Enhanced Licensing Control for Digital Text: As digital content licensing becomes more complex with different rights for different geographies and platforms, watermarking can help content owners and licensing agencies enforce these rights more effectively. The watermark makes it easier to enforce and monitor compliance automatically. + + +## Conclusion +By leveraging the BERT model and the proposed algorithm, we have achieved a 94% accuracy rate in detecting watermarked text. With an appropriate training dataset and ongoing advancements in AI technology, this approach promises even more robust watermarking techniques. This progress will enhance our ability to identify AI-generated content and provide an effective means for detecting plagiarism. + +[^footnote-3]: $\mathrm{e^{-i\pi}}$ diff --git a/papers/atharva_rasane/00_myst_template/mybib.bib b/papers/atharva_rasane/00_myst_template/mybib.bib new file mode 100644 index 0000000000..2296064be0 --- /dev/null +++ b/papers/atharva_rasane/00_myst_template/mybib.bib @@ -0,0 +1,238 @@ +@book{book01, + author = "William Shakespeare", + year = "1998", + title = "Romeo and Juliet", + address = "USA", + url = {https://www.gutenberg.org/ebooks/1513}, +} + +@book{book02, + author = "Herman Melville", + year = "2001", + title = "Moby Dick; Or, The Whale", + address = "USA", + url = {https://www.gutenberg.org/ebooks/2701}, +} + +@book{book03, + author = "Jane Austen", + year = "1998", + title = "Pride and Prejudice", + address = "USA", + url = {https://www.gutenberg.org/ebooks/1342}, +} + +@book{book04, + author = "Mary Wollstonecraft Shelley", + year = "1993", + title = "Frankenstein; Or, The Modern Prometheus", + address = "USA", + url = {https://www.gutenberg.org/ebooks/84}, +} + +@book{book05, + author = "George Eliot", + year = "1994", + title = "Middlemarch", + address = "USA", + url = {https://www.gutenberg.org/ebooks/145}, +} + +@book{book06, + author = "William Shakespeare", + year = "1994", + title = "The Complete Works of William Shakespeare", + address = "USA", + url = {https://www.gutenberg.org/ebooks/100}, +} + +@book{book07, + author = "E. M. Forster", + year = "2001", + title = "A Room with a View", + address = "USA", + url = {https://www.gutenberg.org/ebooks/2641}, +} + +@book{book08, + author = "Louisa May Alcott", + year = "2011", + title = "Little Women; Or, Meg, Jo, Beth, and Amy", + address = "USA", + url = {https://www.gutenberg.org/ebooks/37106}, +} + +@book{book09, + author = "L. M. Montgomery", + year = "2022", + title = "The Blue Castle", + address = "USA", + url = {https://www.gutenberg.org/ebooks/67979}, +} + +@book{book10, + author = "Elizabeth Von Arnim", + year = "2005", + title = "The Enchanted April", + address = "USA", + url = {https://www.gutenberg.org/ebooks/16389}, +} + +@article{Atr01, + author = "Kamaruddin, Nurul Shamimi and Kamsin, Amirrudin and Por, Lip Yee and Rahman, Hameedur", + year = "2018", + title = "A Review of Text Watermarking: Theory, Methods, and Applications", + journal = "IEEE Access", + volume = 6, + issue = 3, + pages = {}, + doi = {10.1109/ACCESS.2018.2796585}} +} + +@article{Atr02, + author = "Lancaster, T", + year = "2023", + title = "Artificial intelligence, text generation tools and ChatGPT - does digital watermarking offer a solution?", + journal = "Int J Educ Integr", + volume = 19, + issue = 10, + pages = {8011-8028}, + doi = {https://doi.org/10.1007/s40979-023-00131-6} +} + +@article{Atr03, + author = "Yichao Wu and Zhengyu Jin and Chenxi Shi and Penghao Liang and Tong Zhan", + year = "2024", + title = "Research on the Application of Deep Learning-based BERT Model in Sentiment Analysis", + journal = "ArXiv", + volume = {abs/2403.08217}, + url = {https://api.semanticscholar.org/CorpusID:268379403} +} + +@article{Atr04, +title = {UniSpaCh: A text-based data hiding method using Unicode space characters}, +journal = {Journal of Systems and Software}, +volume = {85}, +number = {5}, +pages = {1075-1082}, +year = {2012}, +issn = {0164-1212}, +doi = {https://doi.org/10.1016/j.jss.2011.12.023}, +url = {https://www.sciencedirect.com/science/article/pii/S0164121211003177}, +author = {Lip Yee Por and KokSheik Wong and Kok Onn Chee}, +keywords = {UniSpaCh, DASH, Data hiding, Unicode character, Space manipulation}, +abstract = {This paper proposes a text-based data hiding method to insert external information into Microsoft Word document. First, the drawback of low embedding efficiency in the existing text-based data hiding methods is addressed, and a simple attack, DASH, is proposed to reveal the information inserted by the existing text-based data hiding methods. Then, a new data hiding method, UniSpaCh, is proposed to counter DASH. The characteristics of Unicode space characters with respect to embedding efficiency and DASH are analyzed, and the selected Unicode space characters are inserted into inter-sentence, inter-word, end-of-line and inter-paragraph spacings to encode external information while improving embedding efficiency and imperceptivity of the embedded information. UniSpaCh is also reversible where the embedded information can be removed to completely reconstruct the original Microsoft Word document. Experiments were carried out to verify the performance of UniSpaCh as well as comparing it to the existing space-manipulating data hiding methods. Results suggest that UniSpaCh offers higher embedding efficiency while exhibiting higher imperceptivity of white space manipulation when compared to the existing methods considered. In the best case scenario, UniSpaCh produces output document of size almost 9 times smaller than that of the existing method.} +} + +@INPROCEEDINGS{Proc01, + author={Jalil, Zunera and Mirza, Anwar M.}, + booktitle={2009 International Conference on Information and Multimedia Technology}, + title={A Review of Digital Watermarking Techniques for Text Documents}, + year={2009}, + volume={}, + number={}, + pages={230-234}, + keywords={Watermarking;Copyright protection;Internet;Cryptography;Steganography;Computer science;Information security;Intellectual property;Data mining;Law;watermarking;copyright protection;information security;text structure}, + doi={10.1109/ICIMT.2009.11}} + +# These references may be helpful: + +@inproceedings{jupyter, + abstract = {It is increasingly necessary for researchers in all fields to write computer code, and in order to reproduce research results, it is important that this code is published. We present Jupyter notebooks, a document format for publishing code, results and explanations in a form that is both readable and executable. We discuss various tools and use cases for notebook documents.}, + author = {Kluyver, Thomas and Ragan-Kelley, Benjamin and Pérez, Fernando and Granger, Brian and Bussonnier, Matthias and Frederic, Jonathan and Kelley, Kyle and Hamrick, Jessica and Grout, Jason and Corlay, Sylvain and Ivanov, Paul and Avila, Damián and Abdalla, Safia and Willing, Carol and {Jupyter development team}}, + editor = {Loizides, Fernando and Scmidt, Birgit}, + location = {Netherlands}, + publisher = {IOS Press}, + url = {https://eprints.soton.ac.uk/403913/}, + booktitle = {Positioning and Power in Academic Publishing: Players, Agents and Agendas}, + year = {2016}, + pages = {87--90}, + title = {Jupyter Notebooks - a publishing format for reproducible computational workflows}, +} + +@article{matplotlib, + abstract = {Matplotlib is a 2D graphics package used for Python for application development, interactive scripting, and publication-quality image generation across user interfaces and operating systems.}, + author = {Hunter, J. D.}, + publisher = {IEEE COMPUTER SOC}, + year = {2007}, + doi = {https://doi.org/10.1109/MCSE.2007.55}, + journal = {Computing in Science \& Engineering}, + number = {3}, + pages = {90--95}, + title = {Matplotlib: A 2D graphics environment}, + volume = {9}, +} + +@article{numpy, + author = {Harris, Charles R. and Millman, K. Jarrod and van der Walt, Stéfan J. and Gommers, Ralf and Virtanen, Pauli and Cournapeau, David and Wieser, Eric and Taylor, Julian and Berg, Sebastian and Smith, Nathaniel J. and Kern, Robert and Picus, Matti and Hoyer, Stephan and van Kerkwijk, Marten H. and Brett, Matthew and Haldane, Allan and del Río, Jaime Fernández and Wiebe, Mark and Peterson, Pearu and Gérard-Marchant, Pierre and Sheppard, Kevin and Reddy, Tyler and Weckesser, Warren and Abbasi, Hameer and Gohlke, Christoph and Oliphant, Travis E.}, + publisher = {Springer Science and Business Media {LLC}}, + doi = {https://doi.org/10.1038/s41586-020-2649-2}, + date = {2020-09}, + year = {2020}, + journal = {Nature}, + number = {7825}, + pages = {357--362}, + title = {Array programming with {NumPy}}, + volume = {585}, +} + +@misc{pandas1, + author = {{The Pandas Development Team}}, + title = {pandas-dev/pandas: Pandas}, + month = feb, + year = {2020}, + publisher = {Zenodo}, + version = {latest}, + url = {https://doi.org/10.5281/zenodo.3509134}, +} + +@inproceedings{pandas2, + author = {Wes McKinney}, + title = {{D}ata {S}tructures for {S}tatistical {C}omputing in {P}ython}, + booktitle = {{P}roceedings of the 9th {P}ython in {S}cience {C}onference}, + pages = {56 - 61}, + year = {2010}, + editor = {{S}t\'efan van der {W}alt and {J}arrod {M}illman}, + doi = {https://doi.org/10.25080/Majora-92bf1922-00a}, +} + +@article{scipy, + author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and + Haberland, Matt and Reddy, Tyler and Cournapeau, David and + Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and + Bright, Jonathan and {van der Walt}, St{\'e}fan J. and + Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and + Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and + Kern, Robert and Larson, Eric and Carey, C J and + Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and + {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and + Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and + Harris, Charles R. and Archibald, Anne M. and + Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and + {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, + title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific + Computing in Python}}, + journal = {Nature Methods}, + year = {2020}, + volume = {17}, + pages = {261--272}, + adsurl = {https://rdcu.be/b08Wh}, + doi = {https://doi.org/10.1038/s41592-019-0686-2}, +} + +@article{sklearn1, + author = {Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, + year = {2011}, + journal = {Journal of Machine Learning Research}, + pages = {2825--2830}, + title = {Scikit-learn: Machine Learning in {P}ython}, + volume = {12}, +} + +@inproceedings{sklearn2, + author = {Buitinck, Lars and Louppe, Gilles and Blondel, Mathieu and Pedregosa, Fabian and Mueller, Andreas and Grisel, Olivier and Niculae, Vlad and Prettenhofer, Peter and Gramfort, Alexandre and Grobler, Jaques and Layton, Robert and VanderPlas, Jake and Joly, Arnaud and Holt, Brian and Varoquaux, Gaël}, + booktitle = {ECML PKDD Workshop: Languages for Data Mining and Machine Learning}, + year = {2013}, + pages = {108--122}, + title = {{API} design for machine learning software: experiences from the scikit-learn project}, +} diff --git a/papers/atharva_rasane/00_myst_template/myst.yml b/papers/atharva_rasane/00_myst_template/myst.yml new file mode 100644 index 0000000000..6b2e2bbc88 --- /dev/null +++ b/papers/atharva_rasane/00_myst_template/myst.yml @@ -0,0 +1,57 @@ +version: 1 +project: + # Update this to match `scipy-2024-` the folder should be `` + id: scipy-2024-atharva_rasane + # Ensure your title is the same as in your `main.md` + title: AI driven Watermarking Technique for Safeguarding Text Integrity in the Digital Age + subtitle: + # Authors should have affiliations, emails and ORCIDs if available + authors: + - name: Atharva Rasane + email: rratharva@gmail.com + affiliations: + - KLE Technology University + keywords: + - physical watermark + - logical watermark + - HuggingFace Transformer Library + - BERT + # Add the abbreviations that you use in your paper here + abbreviations: + BERT: Bidirectional Encoder Representations from Transformers + AI: Artificial Intelligence + NLP: Natural Language Processing + NSP: Next Sentence Prediction + GPU: Graphics Processing Units + T-statistics: Test Statistics + P-value: Probability Value + DRM: Digital Rights Management + CMS: Content Management Systems + # It is possible to explicitly ignore the `doi-exists` check for certain citation keys + error_rules: + - rule: doi-exists + severity: ignore + keys: + - book01 + - book02 + - book03 + - book04 + - book05 + - book06 + - book07 + - book08 + - book09 + - book10 + - Atr01 + - Atr02 + - Atr03 + # A banner will be generated for you on publication, this is a placeholder + banner: banner.png + # The rest of the information shouldn't be modified + subject: Research Article + open_access: true + license: CC-BY-4.0 + venue: Scipy 2024 + date: 2024-07-10 +site: + template: article-theme

    AHkR(J&81b|*OZ9_ zs5Uv{*RGJiPTEp=ZXEkduW;`7h%CZA4b!(44s$f9>sM3QS+WUVsj4ez4RL#UsS`U^KN093al z<;1CG6#$3T{YLCZ9Zr?A_{rg>qvb^xwJLq^+a|2ModRU-Mo8Uh()GmFi3P|Yb7~=O z`8UrEsHsje0CT!)|;rBkvRdQU5+d&6M$+@BDIXaE#R!+l&HT=2eY=qh+8;PkWhxe zO}Q`(F6-C>@V4cZQ9hY_YiVL4CQgF+(Pan@?~~R)*a-j$pw;X0O*m8`Z>)WT9^P)= z5D`J!MM|of{xb-vVa{)GdcFj^u09W=C7|^S+2|O(kN@pX$NKOF*G4AQXrw_`UkD9gjc^D5=JN!+h64n$TrPkvYZ~{1ZbLA+HoucT z^!9v>fA~OC#dszA0)x)JMh-7JA?^|RjDEVjR=L!H6)4NfTcqjw>Qo`pVS8oGqf-I0 zWKn*J$fQ5R;IUtd15lA6&4sV9tHATL( z-$2)Je{HU3B*e*c)@vzm$LMRL=i)2-KetbCx&*qcht9ir3D`|-1s28YvPOZ7*e=0% zqziG4C{*K_OmSdW;*UFG4!Ml7>vN*x#F85kQ{HsM=vt#UNX{P{p`j`f@~NP>eNt&- zLmyd;r+rZEl~%!HG-N2?l`YasGK7^w(99DF)|ag0$Oi`e#Q0r8M$SzS))GqEVyb9jZyxJ3yudKlK(MOHv|0S@+v z^uSx?Hv~gxTNI0Wdda(z(s$aHZMMt@!NPEMVGB8Amp=as>5%m}^jQ$DV&;4rV$nW) z_f0lO`rhOTHih?f!KewYTq_q;M#B5l}qaCa`vO%~TcS`1PrEIdxXWEr^tp%}?5uZz59Fm{8n zxQ-xp(1L|{R!P=WOiBor&B**rFB17j7T7<-lWTbWV*s5Z>%2~)`_nJI zl3swt{1o7A7$?TT5dZvt`)4><#dPVJXVUz8Z>F6Gbi+V?>^0n?NG?2huA9rC@XHif zG2gq&9_(yvrE6DVL@_FYs4U=OoT+GYGfNl=oHamH>3F}BhlByq0XX2j+YcVI4k)A& z`#^5)0RZKQ6abyHzTpe7v1iGx#RyVGDw=Sg3nOSl~ z86PolHl5IhXB4)mQXYpzqNYBjP)1W}`(QKGw>LRI?8yM3sx#!`GRpH5XXdz< zwPp=!7>8~QRi(C{HYxyhu5UWXF6ZMYN0=XZHiA(nceO}PKb%`QZjOgk2OMDlkC9)) zsu|AvDf`2%VRHZ!fNncE7+$y6vms**>h220TGj>-mqUrxNFBhi! zAvaZ@0L*sCx8{$VE446%EeGM-LvQ0;wT6_IFN)ha$T=Ihrl)0XZKlrkk}l)@ay^Jt>tMkF}Chd%95b_}Ta7aJi?n%x7=lm);nC%>CO@KW=-%$514nP5rVCNV^ z>l#g&W`~YguxI~Xji-MEM1oMA5_O%nM_ndc*hj;XM!ICL zGxf09I|3hSx^+g2w{aZ7>lKM22!JV8CJ8*SFA**gWjS z5o@>&$aheYy7~}ZNmz^pLFXg8wju1HZCMX37SGTLCW_jyXDuQXd+0RVb9Dot>NJ1G zjKrSFAeV_$S&V3}W($!AAdZ&i85NB4qoS?zOrdZAjwocjT^uN)ltm1GFZD2NFcAUH zD!SR;%?$uViEcGn*3M{RL~4`bWz(j$k>3ECED=hAf3w=67l9VnV^jH_pHYApb~fyxf@y&yf}|~! z)#6#>!9>)^Rj{k>9QPR!XyniiPi^cO62SGjHuWGRq=SZ#OYJFZ%Z2!xdjfDWADz$U zdj3I<0_pNH1d=~E)m*~sOXS(nq=(3__vElFP;BQt-oNkV`r2W>2^}S=2 z0JEsfLR#Gbi?dBX(?Kzc8$GqThIqzyui~739L(5ZSrO%*^X->pU<@jPz-S8=StIp@ zUmWTq{#n(XBibHxnCxP*{M} zgQ9lf1Y-(<#TMuj+;j^%yp0lWw#Ml~6Qwc{78GuZ-+WZ&2r^shg@KBVzAkL`(QVQSK1%yV( zBafGL@IMxkr}O%*1>f>LTgzv}kqt2JBq2l6{=SaV3MIlfve1T)Tra~NeJICXn5k}3 z4XjufobO|xvKWD0iVeF7qX`@{gv(E)Hehr24B^36k5Xu5%Mr&EJI>dzo?$crOHBMi z7Br5A*`o@Y{i|a%vp7bZFju`CnnA!O4-hQF21b9!Lx8Mp5t?f;h+vyg`9Ym&eM^ti zmA(ed>0BZxnm+Z+vuX6oG%b9briF9mnzM_hR^$o8Rv4{nD;N6&HUW{5KgKg?9&1c*NC0I4ruNZ$AEIo!$ax=4Z@l$hdWT}iTQ5AzdhMmPtqn%oPNxd@ z>6q1(#btndp78o|8fDtr5RTOb1rz74+$3)oLxzE9z(6i65(ej-43Yy?J$evlA;Wp< zK4on%G!|05@%m3uF!Xb!qCSU`JY+GMInk-So_tzm&SCN7MEjZ>9&|f0VxX_kM$-(dqQs?|dyC1BxcD-%RZxm0 z5OTG5_YMxj4CyK3=?I3=k-5M4YhOus{_M}w`rU)fI$pO(j|j^kq|iqYxm^o76rnN*u!NUzR*h&R8V79KxNuYCIDw1Z>vgKvEg5KU3# z7Sq`fV|9b==z2+YxO@AJbS9lm7cO7o9H98w(;*b@2=_Sq#@nd~%X>-$;b4<)8GxZK z^T8P)a^u#uwEodN&J>O%qo|Kh2*UxQrs;CANz1!Pm!_Z*<4dnXy=xZT8c#zyq-C*&fc)&KAxEvV}e9 zK@JCyjruyd?C3OnNeBW!FixwHfVp`hEB$~?x8J zQ1MOXNC!cH&NoNbp6ToNkmYA@k_&Dp8;6774DlQqkUr95Y@Oxk#4=~FsvhM&E7XS6 zk)^m1L51K9R#|qx4w#i7Ji;mJcl}J0z{dGZecXoxjYP~KsbYJy-b1aT`{+Od2-J=` z+(KJmi#L0kZc=cWADc$oqsvJrG`wstb#4IT&l>=G7SZjJlP}Y(ZC+=ZqA78v{Mp;o z@UFU1`>E5?#>i`E_BiKuX))DLixMNG(ZSmA0f-=roUNsCEloc(_fjUzh=P$-+tuZ< zVZ&pCYzVqmrjI??CFRTLgGF-gm+eGl3AJHWKHCxTPR1H!-6DTn+s7R`$YBttFjJUi zE!i_{XMlTpp~s;`2(16G$UK0km;n73o{Fyz2ge8DdOo9}I`%9U7E2mnx$-eN#y;A< zjn1V3Oxx5o!18cLh%S(x(||NkUJ(NJpFb2QqD*5O0^lOEpscuW7A1hLDKQP!JwP=P z+!ODI9Pf;EAOK69op>hB8i#<(_uWq zq_spefHl++kwrI8JlIOkkU)@63*02T`}4HW9JbziA|ok?L&&)=^fk7F^UJyy9b{9e zBvQDGwMuU8~^qK{LeJ0d~{lH0bERu1p0wZ!}CVa^j>0v zRI)6DdAB_XY&QsAASncmCe>_^EplXC>?i5`b04~aCnSg!+3VFeIGCbw*-;BcBAtSb zv3aqJSi%F<`;oS{+OvQ%62!D&+cX5GDYQ`TYDQ7JN8d2Kc^*!gQCT+;Bs`;~u0IUZDBBickmKa=)yZ#zi*NP2`4H4+KF)#eK`*$l575hByT6QP9DA z%w_ZWLyGyiyQI<3Q{f*xH_z(z{5j{zFTECXTDmz*&cJpYX#*C*qP{jr)Q0g2hm&!N zHDWP0iD2i6=0w4gT z4tr82S-%$`&}1Ac)HAt5yYv`kLKP z6a4+>-H{$u(b!vM2> zke(E=Q9RdLM2;)~7(z}7xuU$BS3Y*@Qo6_FwkU$aNjNigHm%MqQk*oweZcU-8jW7R zl8Wr>gTMSv8o`jBaPG1=tNBSnkFZaD7=RYa!|>CMFMTo18xDneOCM0|A+t1cCS@_+ zd6Y`7M7Ifm;Q_Mam~O#-yF7s*9D${VLFc%h!uqbRZKtbW{4(pamA>~UKS|X!03i>v zSDs3h0s6xN?2oz$>vz$r?fli$`}EDUJ5PQg9T75shAfV%W4n*o=fR6t`L~?_vgz5U zpQba&DwXqxX46&dcBWW^qidiq;xJ-u(*>&WM)^|54*xI3e zR9z>VGiEo5E{u>Zx%yz)PXVY`FTa=`y#9VVKQ$6eh||^D7!pJFjuY<2>DeHMx+HK# z55}+t-tRt@aq2kfoL2!LYz6yaI)lYe>KP496vlD}jPf`h-p(~Toj4Psav4uVxdW^qg|T}!(&q<*b2RDmAEHDuq&SfPpTMeP)+^XNNIt~$kgHQFA& z{?0>ET&U&Xy>*OQlQ_Op;tp6Rb+T;))l(&P7q-OI0E>^|3#i5gr}A&LM|^GvlJGCHoW#DQdhy+pmP(G3}8G^}b7TYzmMoy(3FO?37# z&a7R4x?nty(0@73R}=6s(1(qK=$K%XG?ppu{|FmXhrvRT2JQ)lk$b^NXi}n3s|+Z$ zLr^!YToJaghcK!PeR&jC9DtyiYE(>5p?wo)v_88L`d@ZU0;HGyHr2s(qu_v6eMD6S zUFcIMRB|I+AICb2eZ-NSC(2Yn$M?d%9};np**3kv5zXxy_kkSQ;luTk5Yicnh#h)C z8*fgi)o2i~?@Kav$d~BAXwBGuv35L3bO;)b^Q6uH}xNwAhWf2`!Y&r$!AL(^dkeu_rNLfTJDpx zg@AHzJQFs|NvkYu+;iA{el9L(=mcxuk!u-W0j`}M+#4MX*;%nuq-i2cl4`D3+SXWIoF;M_oqo00?(FF2A$KNr&ZVzPQ7CEy>lJeWIr1)&3dWJJY!f_bQ@=p zXVIYzU4ncW-9Ka<%Ge4XIQ))Yl&I}dZQtr=Dd%}~!v_7#GdK|zR<15Ar->`))BJ-c zF_u|iKf>Bs0N9OPDJaWyKssJ=3rydzBSVL|b*Tdm$5Q&ZwhyoFiJ49+=ut&jB2Og zSZN*4j)G`;&wvuaxk^H3qA4xP=eQI(62c~PQ-GYuL%jAVUp>n&_`5cnrFH6)TOjGHB8FGEY;ZnX$n4We&S~O*$;n~)>f9%%@?0ZYwOMQ z@BOELH~rrK@!RR*Rf?Oa7;XRxsyG2Z{q}3={KboDeQSeUDCGZuJ!5PzsFF}>U+SlU zH@BR6XxLpN3|pC7U>L?_ItZ}Y3XJ9%u(l@x&C>S zj8)bdx-isn1e-A8N1TzNkx}FZ=Z#Tvp1%R^Z~fsTL=726USfI==fX4E#z6Fr0=m%+ zh78pQhUg4$4dQ6-aa(ovrB9{^T_bK7V35ng_egKR(Y$owQhLmO;*~&@a9>1LhKm$Y zapsQ!eSI)8B+$}vVKMchbo`D_veGWERA6VTw`Ljj(mDfmp86=Hk8z5kx&kTk=OQA&rmLY#>kQ~oJUIXyp zY~I0H;Mjm?PSZNF-Al2dr%YzdwmfWUM29L?u=AT^fUypy>05SuuxO~xI&}d|u+oIa zaq9S;I#ONYd(7>ZMOMLU=&)K^Isg`XN=k-Nn4kYxkf4(;E4+^m?1f>p*u?S4Jw1cm zGdaVYH^btiS2@R|LNyqWFoaW(V^3BA4J8<{9a0{0c|bHQR~fl!AcGBL)pnLfjo3Wy z*SuR;Y0j+VhsN-bz2HV+kFdtPMuTflXaSr0JO);H`{+?qF#Nm5eF`F?k2!0sg0o|g z%x{T%QZIN$J85+obYwn*&Z{mirS>o_smMb=q3?15aMH&#w<%I(1PK8=*oP)EafnSg zQP(yBNS=Kn2N(c!8p@F8En_RY$^9(i)c4@^4*;C)c7nDHQsz)l$ay=&F4V9e8h`g3 zTnvEp8Oh)|ha<&v+Y)Vz_HPTcN)!0jU`P|K;#fpJoAwg(Ha!)FUxW$`Q;H9yys7L>6|NFQwMA@Xs6Jv5y}`x9LlkO}jG zP4z34N%P?~0uNbYQ~Y#E&?Vr31G3GOj$E*OJ{=*Tkb$cJ}WTy=(v>Fe000Kr_b#KToh zIpEC6{07kFv%{EiUp!Ffy6kreW(BcE96(eFnu};EpYdmSlWyQiW&M6dSK&v8!&DU7rfnz!*giz-f90 z=fpN0eA!3Wp#w0XHmF}m5gE3-g$|&@y9U(xTtC=%;3KqDW*>+Jc~J3uU~VHf ziv`y6RCoC~7Bxg(8xPJEpW8W9l%m+zy9jBC4ioqP>|BAXB$BiL{a zj^Poh#?U88j4ae z3;JAG@5^&1IIIC$Oz>8C95_(N0P?sfXf~gp@eA)b4yk}&WhCD9K3o?Mg*kBMt9jVl z07$Q;7p8HtVYE`-Y4^cDd5vFA2r?18xku|9^KzJM*74~08Jf1yM}8L}_u;d zsE3q+Im+P;+h%49Kx%%O%AZBXS%JX-oe1Jx=PDj&ncU7n_9+i-C(~~gr^d*Bnfnx* zSx8~-Ccg+4qe5<{&+fXI>fxbr-wcx-Q6xk{Ao>F~TENR1><}g!V9-LaW!%K-M-Qlw zK1tgY7CNkCZSF~Dy-A~K!wSpl*4AlzwY?E-?yv|L9~c!t;ZMKwY40_I?20DM{Y@BSS^=71gh4QF`o31OU>_a3AZl)MEj7QEFMEKvpQ zSn0iW_a1~mJKcMi!4rh+yRCv|t%fd4r2eC^G{3x(Ho2!WB!90l)u;Xu>{+Xtb{ib> zZPF+Xa_I+u`MoHjt1~^V_xw~^elnkWIB&8u%a0%9kTBwSagm%s6)WMO0x1eSM-8WD z3pTY*KJda@047+}RTVL3clFUsYQuJ(pzw9{3}_3Q+bEG^(it*b-`H z@zkp`GbmSdK9187?D|O&_8rAnM4{aK=C{cUhKc2z7#`YxGDBAfMwtTgRfe1MvoK8< zU#{KathPBvT@@4-Mza^T&|b{Fg#OCd0UD>JgRKR67Gq3d3Qt+{(?{>q{C_V^pad6d zIGzB=-6waM;)Xuh#R-D387FVvce6AsAHmaFf=O=n)NyVM-neroM|&(3eRs zdc-(o98myKWrw_KbW!z)_E|Zeod`+>qM%8!t1P2>yqCgA!;OxE74ZA-0y;xmAZOAv zYJrY1YKZ$3IymwQ`Ai*?I!t7Tg*J_Y^Z;~^=?eaFYMz5I)PaVC;oeb;tTA8yUfZIr z@o(E1+UcT>9H`TR5vBmqqN+@j=Rjsl=SR~z#h&J1k4==*1pA=fau|qB@okeO(9Y2d zyoMKClX*k0#vcr>g`9+AxE{uo&(CPv&<#b_K>aK`Sr!-oUP}Wqs7xB(#8noqGtQpn^ORwj;WETI{RbV#30YyN2E%kX`iUs5l)c~ z%>dVMRqcDPY!7OeF{a^AKq@F3hVRWWmTc&Q1r-o0FFUmIjDZr!lzX$W2?NbKkBrfO zd4ypA08_C6))t#aPA31Bxi-?;MQW1vX$M{5=SHX-d6neRHtBe1tF_$&M3CY-e5R>4 zetr-8u3?PY6`cv~n5hk{llBefg0%!pa!qcE@A*6ckf{S9tb$_BQUFVI3%^AFQF~w- zjMwP16OUFEUs;uJ`}H4x7z$nR9PWti_q$wHPVS`LO#z2AzJQ z?dEko+&ii{Y^7_;Q)AC%VNEUN^F6y@D+h&ehhaM<9F#!|;$%a0fdzUJXPz%RYY6(p z654KCDhi-OS8!j-jC#iJvz!kBr9*9;QY%i0Yx27W_h`{vFGQF_A^MQHen?H7iwttn z%fHR{HmYG%-DMJ(+F`+yn`6IhNt%&F}5n8)d-)J z;}#q>kNF)6#bSVdZbF*d7FHHrncu+^1yGXlJgG~wcMg%roL9s{N8TB){SA@TdSFfG);J(C3#2Tw|e^9~*^H z^1ymfbAXo|O4Mq>b7L1t!XJY*gAKO*1>lk`OMTprpfL+TIJL?aX5th^Ad9zKB#*65 z-k{H{*JE0P%FO&ai{3IA77Ga?3=#M40qW$@$T*ruWnQt~hnwf?qGuvI?Q@}b!sCp_ z(u#6C2OyHocW+%B!vOKc_j=yuz}d|rPbs|Kp_6hr7tsLP@Pf(gHN2^A63HBWStVR+ zu9l2(FP@YJ&D_*pSYP+ggJOS0w6_sBT=!wdWW(T7D`wQ zCzy^e)>Fgn=j@_9`)N`xV_oC$X^^_fr&3P*v$M6u(10gcK;(?Isd66^Qxt#s?;VPF zxTc0ogHpt>_mhWMCI9T0^o*XN3AzCg_QUJUVlYq0(amFc|ITmz9l|DbK3G|f$vUHF zh9gA$#t+|0|Mh?M9}+gB_vmV!0!M%-?Z(a%^4|r>XOR7G{ONbo=l|nhrJdDDT3KF> zVJj;S9Gt(Ts(9^z9_Na_fcX)69eWG0HZ}J#4c+ zpa11ArN_K)pYZ-v*Fd`a>T9Huplo0-UwH1B^xC(+lU{uJ+0@7A=QqCh1Dev)#tkQJ zk{s!@Nj~&TFQpkAh-E@~7p^`<9&9VkKlzAMg+tOFrWjQW*aR520r^EzKl)(=3#)V# zSYJz3rte+3P7mF8-c85!`aNFVM81k?80TQ+)t{s#&R`ZWWOU<{BCi@nR)V+PH{SqY z*CGvI`R-e3W&XpIg=Mdilk0uoxOh3;{{25lHA3_Y80ay&I*b9X*JeIUjr*ie^-tiK z9jBS4S(t5dzh)n$+AOlbUYBJXIRA6(%bxABaH7ieCAOW}HfO*|f4}f+Urc+*Z~5#r z>@|)(0AX(S0paV3^y9DnG5YQ#-S~yir(2)Bm9F9NZ$4V)e01{f?Lk_A+0FwPyBTXN z=xW;jj*cD<;^{j}?^K-DL4GITt!%Axc5zGrI6VWb%@i`qYs*aP8|Jm=VL!j|wco`t z>q#4=#~{t*B##42ArWa07RDZZYPen9VwA-$1$JD~IW6LRqhM4J@iPnbJcp()m@jLg zpu~X+KJo>5k0}mW)>H#&e!WKQn15UNCD<^JILqg7RMF`*dK@!7ZG;q)g&7$6QW_#c z2Nyt8;!2vCy@#HG4Z>;bLT?l)+Hwk$H4IJK=8ZCH_yF*3G0I?h1dL*+pY%3CE1%U1 z8@MsI91O1D9=I6M1HxoW3NmJ^ExU_ySWIA90bxv2j{hnu z&JgJgm}nwqy`;XNgwe0`GG_pi(TXO_S#%M?XjzccLgq{>am;U)J*<=FAycL!AQ51i zTNN3fov2vHtj^z@Z|!*e3|kwXoZs_I zc3?50Wz^|_wfE}50%ODCi{JS}i80^8=J0v|D?m3!_HqlSt{2Y`DUdD-FEst+1P5BZ z<84`hYRXewAK8{g2Zjca_BQL_jvA?a5>`6H%A$@$bZTI|CDj~2qoXUn(lJqQNZbh+ zE%+213zCh<_7c&uyOo9LUD#_0Jnkg5!9~Tf(E#s5yylc>R}1V_LU=60obW?BUbxX5iWue2q<(lxmbR= z_c6Ka<2S2(EFiKV$`3pStviO-N`D7TFzc7B zpQGSpGzQ6Av+6Mluh?{jR!>ZDSpwu7kti*rs`8G+ZZ=;JOZG!VZ>I6_#6SK~h_5uyTXJIG%5G0kG(k2V+gcx(f zbP)6+^;-4dTt7c7dgi(5Y?7aC?xve(z8QgQlkvv^xxghfP*jBWBpcFC#i` zV0ddht9dtupgiE_dKq4B13qKyqZ_JG;59Y@*9-Jad(4YoABA+xSWOFu%7D~f^1dRp zzAL1mDMHkKj}S9?r0YUBU9%ta!`)atN-se>2g4?L4d|4EzulzZ5qYQ z!#RK<6X{S!i}P!MT#PAB%rK7NKJA4@7>@AlOE08(ig_|cMjbP3p{Gd737scuJo^|a zAS3kdy+n71B>ydh6?dl=fFvV6H~P88-don7z-~6$JTw`j@|$N>5!&`#6Sz+LdqoMVi6M zdGYW4=V=Fd8%B0#{_yK*YjGuwJo9PJ1_qP<%zFGfu)B}%VO0Cmx#wSq;Vunwu(xi% zpFa7yPg8jMJWTt8w6?IDrk{Q`Z7$5G*$>`}K@qzcoaHB^tC0FIH4U((hqI%kvw+{s z0g;czWhR}SOG})`9U?(m7ahzt9Xo0a_&Ck?hVHQghv{GKXeS4Vy?8z?uQ}wyR(16D zJ_E?(y+?#<(Yh&5%N|D)XE=ugqsS%icbJ4aFc>CI&K`R%b7A;jlsa@*RDD*_%848tU zx426nOyX7Qo0^8@-$)hp01jNeO+Rr4zD!M#gNfV^;y`72uCB9mFrk}Imiz4|DkexB z9b+(pdBgx=!L+)sSD=O58v&9%m4(#^YH*JMh4wFXs6sHe=sd{?0joNY4D1*}p@ZJ3 z1Ch0YEkpNMpWv5;&k`Gk+hw;U5n@0E>skb`A8{R)9`b?+7wgi)y*l1LgKo;UWiHVh z08^(+o-j&XHp*yIKR~?#(B_5MGuE7QZok`6p2yaFd1Mi@9K#`On}<3MQlbhlbghqw zOdXm&heLY`uxw+GDnw(Z(P2g?HAZc&Q>EsjKsSu2g@Mhtc-!^qK_Ax-oeGDSqemYA z06+jqL_t)oMXn0y-tDbTqOXHE#-w?$xTfG}N406@YwyDTN`lzf-=eWzK&XQx28meK za7f)Fi;V;fgQMi1lh@oW^5%YBUuCKredGGc^qI0$;QrTEDKh#P>>5J0a6Nz%x-$`4^q&3cTFV3rgRid*h{`7`c`H}E?4G?+u|4r?o^W^pwNiQjRy1VGv+jUo;$prmsR zbEk1);^(Zb&JCN=xfgAm*Y}?MHy~0wE$*@PS%czi?fy2!$Lek)hGB78Bb}u<>)tE+ zEdUty-&q2Afh21yA(T;`kS`rL?WC-((U<6V%x8(X8YBhU4mKB%+em&I;kC7(wyDJV2u)Y>F-yrb1uU@(`3dm;Ts2h?b- zp!S3{?n1w*V{+h#1~#N}K&=4h#9Vr@l(Be8upaE4Ir$)2MIO0iE(7?bh+Nw$Q9aRQ z4Nq}$`i&GF2=-;XDX^u$+H@1;wjbxpbME_<11IGvr?z%emq;{705n)P$+}1pYTp z*y@o)jv6v$?r#@|Ku^a*=+7+(5f#Z$>Oy<4t;#(>7>-eJwr7ch&1c)@$6|`pQ}UvU zuoEoU@bfp*wO{!%Ii4`p2$|0}gb-Y#!0p@r;J-x~9{|SLRD@omsPZ<)F@QGfByhKK z*88-=U2v-+v&ptW2G>O)t>aajBPDPZxVwQWS^rq!t03y7>v{1;-bdJupLq?L0$$Eb z^!_?VttLCbraIo&{&fvDwo%apcEcjV4B;Ux(|G?36XXU)CRu)3@xZWYeD_f>7V&K@ zZy-=?4WDJ(5%Y$?=`d*=9+45^YQAoTu(1wNhC^;8aWA2}CI@1lXVRnV=Y7tP_ZddW zkvnC-W#eqgbVxYYahi}Uv6iL~7*>lU9}ejuz-*B23L~sdi;$ZvN(O}xX&QXKt7xG{ zKVf6rx%lrRLY3Lk;gG3gSei1PduDhvz4P!MvRX+mJ$Ea8_OJgH?r#^S83WDGkvWV^ zlboyjvolPcq8O*tNqeZ`EWLI6HU)|p%OTo^u^{%#g+ipQmL538ZvdvX`6c!xljZ?S zi-6T0*2wt43G!Y)xf^?@0n~AyU0LEi6grUu*dQ%pW%(_L28La1ewQv67!Q>0y$6dJ zPz)IpoK`uD1;T&@!sh+t6PG#A%OBl~&~6!~X_r)j;VZD;6guSq{ym((G6rX5X*J#Y z#Pg{DkZaB@0kqF!a7kN0=JnXm(DrNx7I}+Qh72-t1pC-h`BRvuqHO~L3`K5H{f!zV z;=m!~cbn@t3XHi0BWk-@!_D;g1BPngFj{C@#dw?P*<>%xG3{e5OfPUSN1Gx!*ZG)* z=sI^kZ6o_@oU_f{b@r0HKhEXeg^9ETaH{f}DvP#{C!U|Wgnl5eE6-pGfJzo{V{ueo z(121q=ImaggU9QD+A66r&wc78*wXXq_x{Q6rWL^M#Vgm+2#?Kj)xvLg5N$=Cz;QMqi;rkSjxj{q+B>05%$YCbTXN$uc^R0zJTbzJvlt z2kgU?_mbk{{PsEkPetzpwt}HPPF@8j#OYSy2*P?a)d3g=L#BN&`-kX|1`!JLWCff? zdJN;~bl7LsGabw>&nkf0B@ID+;P3)J3+0Tl!l6LlIq*To$g$}fBpp%n*K7$EX%OtC zDF!TR=nT6A9O6va4o&^l#^LRSscy61_7WX`?zyz`?z_R5u@sD}hFv5*$H`A4Ljo)v ze)8LqUyGO7lBmGexQ@~?Z({sxdi^_(0;~uI#3<3ai0AMbTC%ACd_aT0=mnklCb{EW zChpgeDma2Ka~DaaKrS*0ohp%MG;P5S6n3IGp_7dI*@&0-AH`bOE5p9HDd8lQN5=q= zvYJHGkf91G1($D5(UOqjEFzmH=up|8$RnoJQ9pVR-kXuR+N~@@UDyX$u2E_t?38i} zu<%^=U|%F&j<_dTQ;XajPE^C8E&||;Se^o;?OkiJ?GZpahdogLajO6t3Z5|D1LVH1 zpqI^i%&`B*kWMXhjKg&5q-;UmvWBFAY||DD09&R=);3?JQdm^f%U(Ne%_y5|KZrgk znf9X}5^29|8_!P2J6fvoeBPtY{@omhO*xdp z=qZ*S+Xo?q{F=^S9f%Q+EY9FD`oIvkk=w;@b*yP7Ml<->xK{iv=#*T+<_7rXTNZ+|#mVD-EcmiZQ+&L@lRZZ8 zI)8kxKv&kx(X!}I9!!A2_x#K+?}KLu#0|KO*B}pgV*HHv@Fm`{7j}3$T(X7p;o8`t zC9WfQ7gTx^J|Ab&>*yeM@!aN|`!OII&X437pq8OcM!aQHO)>bnV`w@9pUzpv`RASu z|BE&1RqzvkN2Hg}cD;jb27CFp4h%2go=i^!7;=!Ty)m^xaBTF9j}Q3~GkIN$-A!kU zC<*s0S*hb2H5viVO}nC3I|o$&Y-bn8i&v3D2XG-7kTok(2RkuTN-M^v2` zq$2yGt})tep0p$?8^ZsPSMHza7ECOMv`p&4F2=4#JE~zSut~BAb`M`ECXTfbR-OsR z2&NW;;X=sp>T!r~q}L62Fc>qk7do;M%8}RjusodM86x5PIYQ(nhXaGs8V2?N<=6>f5`N)%ng9{czU<>EitiZZBUlw? zhEssia532rHW0QP17}+efv|t;t$DxRyM2NM>E0VphJr`XP(~_1UJwq4p??7Z%Nx&X z#|zilFT)-V_=wa74!r&6dWbtZ2G%~Bb*|gs-g;oPG+?Au3= zmoUb~G)n=~5P6*fmLrPVCeB?zMlhO;5stCSgs*WoBCR3^Q+(~Em(v>ixI8->1ven8 z*lX8w`jzL?@=tz}u3o=P%PW{3JcD71kQPaw7^VeQfdjb8=Wc8qq^F;~mAVLPKe+QC zof{=bk>a@(dT*Y6{sw6rb0H6B&Yukh+aow+PhE!|z*HVUa%Af^J#c<5!wR}V#yL+Y zB_|kGIVTmFNkY&c-Frfj2Im7glCe8LIgGIHN3gvQ7w4!aL_A&k~AMWAbQ4?+%?7wBzF zI9NtVWv^m82H2U?Yx}}HV6jOjha()LEFr+<8B!G3(^DAmZK`-Dudo((?jsxIOv3~? zV1aIV$OgIN?5`a)N`MIE%e0y9$w>eKdV|3wroYs2N*>?8m%jA3zLNg%YyScRKp5Lx zNtli4tCymHs6xB3Js9tO)?%=gh7L*9jcwFMkE9+mnQ zvI&}gkaXrYK4of0b7SyeB|fa$=X6az~X0gH_d3VQ%@B@D>; zWMA5N@&s^bAu|p-Y*RN6p8-&oFs26(3qvs7P0m6$d)RHd05|Ih(Jp$s>qu#oWS|7& z8ZVtfb$KwdNR6yn|NN0oDW<}9C00N%P6bM-zpE-;luQoJmjfg+~g^9Ke3}Apbr5t_i`SqvGgf2Q=}i$S|^kZQBK`*BQLfB&04g72pSXs=;P;kq+dz zV~0E;s-%vflPK~%M(@;F*D5y6y|P#sWyKnB{`jnuCJrWg*iZ?=o}@s-R2xCi5CGRnbncS{rxvqSHW)=LVFFH2$67pBq~Pz6zRia){bQ5pK|a6D z?}#y@|5*fO7(0m3LMYsAzPKMFjR+h+<9k7;fK`3x!XZ1s+$tRcO**U8vy2lbrYS=R zH^lv58z8he5S`cfS$^qx_yzXhRa{qj;-5xY{4n-2{`RmT6M@(Sh~6W3!s7i1zk1)= zL_nc+N`fw1%ejAEH@ZL|#|S(><5xT%<{WH*Gs*!lQtacM5_y$ulnje=!Wy`~p=-bw zk$=y7JGy|6#>TUPp+7qK!1ICaMpncMI_cP2ys7pT3r|I29f)w&x(g^eMqEbnA?mDn#!Pk5ksn@qb)2~^?t=HQU4@P z;>@7|;y?9A6cO^BEOB%z=|-XT!Q2Jaj7Km^sH4hZ7q+_fB@C21B#2aLXh>`q6O4hL z6XN9}ir85V2@wE-lrs~T1g!#K6qN@q{1k3jFGFo7Gpo>A;cu^8m&KXl1wPX@V0t4jiFha|mN&C>9w-kyHRIGKv+#Og#W06;q48 zl4t0@xyAVbt+TNV-Jq#`k$lbw8CEHffrYW3rg>R|Fr*Ctu?2P~9J-%}1)3;#dugi7 zj%azM=cC7F{%jPK@tBTXG*obck@X&dxO`6z&f)`G=m ztvrmKgms>k&&;$iZm=^wU467>qA5JaL@zAb(y-3eYy%}TNhltrOoxIa4BZw=F=sv+ zj7=W4BPQ(8e#pNa|9C<#&2u!ER{lFb8Z^{Uf+cS;Znc{Nj{%sAnVD63dpjza=SIFvsp(^V205Tl zUz|$UXn*zrsR@^0-JiYjVmhT@?8dXtr2p|B{iAS#8UPh@#)>LE9b${9Qu!+vxqw=T2w*Whuu5kP!5mMmWlhyQMk6v0E--qxBXg=TJ$9)$F}){+l0ku zQ-yt0(VYmY*VdTuHI80|g`J&^@S-j7@&!5ya8f>aAGVR#>Dcw)6s^!bVRd6OO;C~C zOG?Ay?EQ4<`e%dfsU2>k7@9PlpM#kkO=D+A(+~gnn{)&rXI$o*p#rm;#gx%=hljMmU7(Rg3MGIIlL{zWU-#Mw_Pen09V2ed<=qJObRHm6|vj{nu}% zl@9@^Jru8uoTV@fr-LwE40Evvh2FVGn;YrUE1ye?3ox3q6xz+qV6<3T_M-WOTyTu; zAuPFV-o`Q7o57eAHSj~)W>*HT>u8KdY>84D>#E?=VM+HB;#PhXl$cW&QK zFWh*Z?g>rCRj;PacR#{88%$R&jHP=gmDIynZ`=@-86iS#D>hAK~6b5By8c7iP!nJ0JNXUIe^ZM0~Vzp)h|uY(qh zG9lg;q56C`GThIYXwq_;un5rw28Xf;3%>{3VAm5hSA-+c-dRA0-3cs)M59E(rp&BE zHwIwBj$wQ45EBDYc!eZ_g`8XHj~06BBKtV?^o!}WAHAA(c>Z4UiFcJjz_3QBZ1p_4 zM|o?0u<9GG*Dh!u0}^Q7#$3Xj)1zQ%J0-kvXd>0jz9^e5=PLLc_mXDFdpiY z60C~jadS9eEmH_AOy-%6(bwk5<_YSvL$l ztJ8#S9l&r;(bjH*9P}!=l&7Yaal^{+A$ri3b=F;&g5eOC16u4gbZi8D=q~$m4vo`w zh5SZsf-H^1hy|w_IP*c>bNN47NKe1y8gG>33>*6f9#<-L8kj6#;ehlf#epYb0WY1fW0Fnwi zH8nuf0efobBiJ&ob%4#2onC_(K2cW_UFqgbYZGiOH~-*adhWtBx{7m&9k8p7#7z(D zV*mG(9T+(F(UdehW>`>ePgPlMBUC3`%k%;rhf{2uMU4e?u!AUSq%|$hEpl#<8-SPV zWBQpXAr?albOb|!A_=EG3t7=;u5}rv*E$|;QVG)LG;c^9If(b*ToWN3qGmul&@fWR z{RoNyXu1JIj+FKDT~GV3Rv;p}(YHmMp$rb8Q!HD6>F!>1N(mh+*mUK& zx5$r0Z*Xyb#`+2VBb|Vsxz^_M1`y=;f?VGt4pG**HXaJm4gef%yn7lszz`nsKG;?M z!DV?pdm4a&xA0K@U*`{(K}h*5fDHTC2{ZFE)-;f+9&XrU-sm+T0AwxkhXd?K8UyY8 zKw-h`I_1-rk$$ZAYjjA)RgHA3D0x-B~phYC+e%ta84TSVbODWD88D4iN@s|qkS0|2(^ zizq?D!c(xihKQ@Qx*@uk;8akaun`EH`I+X_bpwEN48|yvbGZR^Z&6yLeDJ&)mJg_iw*kmLU|l51`?ZLDe4#PmA}c>~$W{bNu08@O}kZ zQ0T8^jDzXm=dvWS6i%+OrJCnzjA>(SK<*ChKirq`DH`wN-C}B3WBRgU2qu}BqY){y zw985Uru!c=Il6+?}q{9%vNLdK*PKbZ$C5y7L5CAnd_GfBDv} zG>I`Yjb;;Y*JAqB=rnm~fS+B)yw+Kp0hF6#qMHDh8ewP?6>aHasL^f-S&Z2PY-)z; zPIHipu#FQKWAmfdA3jJ^6DSN=!4sSrd_Uxz{>pfxuEhwG-n!Vw5#*o;`6bJkcpp0{A8N+$&g5Q-KlK)X1Sb^cykTxV_j>H9mDX5YP?a`ST-rTujI`cr9Z zVUr>+n$dF(Rc2G<1lugWhSO{=JIlvevxp67f=+n+@KHKMx9BLU6z|=i!9G=C?ay;g zaMm~rMoePj*C4uvT+J=Oj=7U#fT}F13ax&^!!TqHEr{?Jt5YJkSU`P>A&=bfWVbqmwb3v&?Xp32`5>UY&AIvTt#{Mh z8k72P46EpwQ|1`UP-kJoO5_38abV3)lPNi*fG%1~2|7@4f(L{xl)uJ9XGNV?@VuZK z3h1wXnD zv*}wu`g)qWIF{Cy=F$yXlo@d_g3{X$i-^u}qg#`keqaLI4lEB+ zlS8MMN$oL3N=DHz<4RZv3b3D zY`wrUfGB%ea!R2Cr#M%USIfP{;^M5Z$syxCoQJxbjg9FTNtA#a?Uz7?>tQ7LhS$jK z`!jRM0Pq}S#2(Ov3QkCcFmMesU0Yv7XjVvRYt{Eqvu zJtRfn9Qw}h5D3qoQQcwmZS^7j(@k6A?8Uues~b|B-Yd3O7Sdes9@fzGkYL66xKpr$ zz%sgRa8EL_uCXZy4RTn;6{Y~GTRiusE67|5_BzoL!615!+iXa(=@9TGGiq*ov=#$s zm@?y%`@rgfz!AJp8T1#DEqJ`a*1F$9t~RW$vZK7z*biGrI%Q2;k;5r(9pW_cb7kK% zuH3iLTROKVI5Ku&@cB(2J>aZ#b2d1L5&dyUkb1x|!@k~VRX6Jnq~(18arRrM$talm zPbVrsEZ5V)=gNF_o=E3Z8L}ooT&P})b?5VS(jqN}cjRkSsH!Lm2!&Ok0zi^LL;egA z*}GMab$DzH5VeIZ#Hhj8HTgj4~A!HMh4X%b}Q5G3b}%lsOmF^|jFbsq8K|5s+&FOFk?&7iHh-Z6p} zGK|3)?I&zYUgU-GDHsFC;h|{gv)V(IxxmSGE^Tu1JoMdwycUdTm4jPgv5qPLIus4N z2{Sp6_5jLhg`9vMV7&1N@XYZ;35ROV{Hdi*nDpgyvDS&##QDnzj$=<1KeU~gz z0|lYirJzTWxNCtFM>vBu_3Z1^`WpYVF{E3BNB0rDi-51w6H`cZJh*Q|YXF+=?tTu> zFa{Wb!5|r0x_aX}Kxu|NMnEkdribq=d5^o~g_?(Bp_oju11jb>>ad>|Nb?|kPH(Ha&nBd^<#&@oD^&vA(uD{$)Wv#BAjY^`1;%A zxE`c&@}O+x^26W#IxVp%A{wJ5*WHIqu3Jl8C>q;~HI|lW#fA5Lw34GONPfe)C;GET4+Ob;?Z_vQQ@tAOqv?DeFZ(1_5-HoekC!z>Ly0YSHBU z*EP~~c`%JdHZvtfGUkSbO4o$U}ac5W&?_4Lj3(|6uTITUObSzK9plt%lE4l((RwLCUd%UL#J z>a;40SPh%DV9!TQg#fr0SeGGkSND+HBOIp;U_+guBkDM1a}~Q~n>hOtJICrV4ZoIJ zZOflvT?;cVkf_mVzh2=&xL405eC zq>ex}B&f14a?f6io5#=q{?H6M!LlB#q&pAaN^kz;gY?u3=YkZF zJvL`gHb++1bvBJhJF$&!n4F?>1-h&V_#mhl1z0tnEqbnFH_Ajs+JLk&`K7jby!iBG z>;fTlQam(NGPFj!j1*)@9{2tvO87B4kMs=WE^0S07C$fY4QGgJ%fwI8AA4+UvZFgW z%MI3c^~o~#PCopH57YFOtEn|5!J}Tc?g2-X>lP`*F@@xy!u5MG{17mRbaYF}L2B5x z)BQdG#K-!x_?|qS&g91n!FuLdf9MTnFRC8Q&&n_qD2V>29IWGr9 zvU=FBJph5Z$p;iu+8v}ov4c|r_io#=4CamfGH=^@2n)Y+*c1!(N_OI4ulrznqre+| zU2umMmge_SB!J)4W$nXZL>Kxdj#Vx)Bs-7o24mx&;k5N_GBL++_e4!r%MEkZI< zxsM()d_IEn*ktczFL!U%)NR;$4yqejtx;K2!5M@S`;^XfKd9k z`q%S@o~nBgd8W>$^KU?p@~$pG(0LF4=Ds7nfw#r`xetM*=kml924Cw1N~AQH+JKA) zNET%9+$dAuiC4h;AY9^$dyKO!fbwv~4e*!WgH>2GKAY>X<$krWy%oB*6dJ~s0B%`6 z$NLc2p%cPj3$l5S>5LYjHF;j6Ikx@tH4YAv)%SzVSp`Y4k2q2v16w z?k4_pqo3Q@GaCvt#e6)w`YdWyV0h!qqcfW5uK{de8~v=4uKqm*W7#OcYZ!fuLVK=r ztO8;=Vwv(m#zOGvMn3?j+`k~$)CBX2qhJyy*dk_!r0)~AU-gN!5?&%r16g`0L0dSPI47~-z_8b-J)jS*h1l6>3)qK6!h1oq(p z8VAFMY|k!)0>bz98N4(%NQVHvF^nVQ(>%ZfaQUFbwG%l6p@oN1qj1Qt4$V#F9)b{{ z7Q@rv!H;2f6)=l36q;x$fmhRX_Yf!nwcs~eY-t!doZ*E=uRc9kga-vGi#!bLIf}Qe zr%(e-^&l8$2p<`mNx6k&3-~N~tp|j=_vp9R2G}0JwA4A^7T`Pr=(jk$+jA=j6AF$) zucC3WW$cdFT&JVNd1G<$DcCTLk)DgI!(kv_C5v%20lb9*uNcO}pb3K6tpK>OSMC?j z6+Qj9cjv*o(Ha8xhn0wfh>D5N_vLp#ya$VTfiua_1pr|MukeWbSICxmMgga7z|8q- zr|VYC3r47_$U(#Ld9*hZ2E%nOlbdhbng$@kRFDR}2kn{J z!a3?6#ISPy`(T!AH`27I4No`+YiQ^!Pu0sFIf+Vn3GcR-eds}%IZjZe+{ZEwk*hLt z?h=d_01Ahxf)ZU>Tut{VCVJ&luQ2j=AyolcNAv^j;(&Bvun+055rskkKo7Obv75ac znmj|!bw3rz^vxwjfMr8B37}XHfX)Ey!ra@BU`sJd>;*@Qlo$Xlj+sR>Cp=6R2Vo2c zU<^60zzj}YyO5UOy~}vu-t@{BUZE8Yqkg~ho%Cye>sM2QhTjfta7<;L-mPcHE<`&& z+1MZa`o$_jvzkT^#7 z%|HESI8o2txJvkGFMaTD{!Mz}mCpitvgzUXzeiabPAshB>g?lK_g>C32QSTYea~qb z1uF>OU7p4Hz(GZ+=8>n9rDZ0fz0E#Tj0l_7TA87f1Z*1MNKk0F%7}()^n`V>*sn-I z-k|LdX;kjSvI+n*Lw{ZSy}tf`eifPOPYdKioougi7U;;ZwH(rYjIOZoz-WpEF1E8X zG;09o!R_}+u_3ZT;YeQrMv*pCAJG@Nh+@`=3*dUq%SK*~S@SlISwv-MJ!hfgJ{=Z_ z=b!@^pUZs@6NYW>uCQi&4xgvq2*V?2X1{SS<9D(uI>rqgcbP;Dg@7l^=Xf?{XmxT0 z6|(16<{Jrf+_qONB2&s^Hw<%w!X};PZh(gczA`-)V&(`>=Ses6yo0(qJ4CCHPu%d( z3+kK`m~*=<2>ev?4IEcK1bL@B25*ZzKtM@LV8D8xVm#Ex_=E9mwtbW&kTH**>K}u8 z4Uq*oMbcbqyNhXI;R*Tlzm)#zSN|!FE_w?vRV6H$&$9M>-r^&M!!U8KME>Lg69kPA z2&9f>rUn`md`%-17#j@wAv$)~Fgj~#$W&)?>M}Xj=%oYp$O&yV!}eT%^zxM$6tPp; z3MbMoJ%({j{V;Dg@_o?>c9}6;ZaCH=zY!yM+`|d`FvPyegxWXQVH3SLz5BE1aUAK% zk?Y7cYmcz+EiR=q=&w?aiJYXh^ra9MaJzETLjdAb;3|1 zb3>$rDf{kClPC)o8RbKvE4n6&KCm{Txrgmw9VU7yn&dgnq*I<;w>=_}C7gr`$x&{L)e9NC=)hFoyFdPEc!SsoCy@*boDJcT}mISznD zF`ETwRUGqR_4wRg0J-*NeT5Vi!{FSrVR0SKT~eV6$gob6k&JCV+jaN%eo%#c+!59d zfx-!3UwN+)HP_yp$)PhtX%na0e!>07w1ssNY~7>*G$Ao0TN1hNpeW>=o8eq@NcqFM z31;JuKU66BOH_w#a;@V*c|ruS^FCiI0OFj*x?B9i@PhzAI0D=_Kj#{}r2}L;myP#B z_yqX%`T#WbAO}69CG2|u?=aS0&(DKhl^F)a3tojFCtgeJf*#N|IZe%$ zq3T5mEb+V%HyKg8@wgwREL!Vhes1LDqVqf}Mjga1vi36kQS8p1c@8>kBiG^g;vwf4 z_#O-DB3GB|8tsWfLu4hw?JAmUU?h+@16=b7%NV^6|X?uvg zetaQ+9_pa@5fB+yQGZN5W5YFAjsk9BnE$VT@>MdQDr6I`T)vpzfBW^c_;`khh62=?79_xS+MF88zKDjDPNFIv zc1&-KLulwgp~(<(2e-qh3?ht#^hm|fxVEwi1ZT$eVXgGK+$hhu9-TsH*d%gz+(49_ z`8oX*D40vQ^5ng=0(dSia+F|!97EYaKnGE56_nN>pPj`ka!}Ad4gR|g{liY?2oE<{ zk5ks&@M(qo>=usC5kgVLK-#*5^GEMQT9@GnwD{wK%W@P@cqg>kivF1#IuA~hf}lhC zuNHa8Z~V(YpomU@N4_Wua+AEi5^T;9fL1T82Vu6Qo6JoUM?z)j&n>D@m*p{-4MInw z3$VpSRYzL7{W$&RZ~je|`XJ2%c0RcOC{2!DOyjVkdxYKxVW+?G&DTl$m?YPa zK^5%bjbHd|THtjj0KCC-qv_VOR~a1f=c(D#OgEpqnGQ~mBE?{ke9Ws?o=r>SsMXlF zO_xA;X!Z?Sxz*EWKKF|hwY?AMAI1C6rFZ|M9MIkiO>)-!D`t{HM-BcVOPrvg&{}#nrzm(p+eHVr_o6g`UeD|yWEM2^Q zE&ayd{`={jx8Gv_a_O)CC%>6~`oq^!6Xnr=@k08efZszh;M(dozxVP_bj+4tX1{m)-XU4*_@@7*WDvYCp2#pAU_ zWSTu>z2pN8`<;^W*TSQ3v*)!9(+(^OB)^k;k__5IFO^{?)eSWmtH1cYe?|JseyS`j zq*2li4iD(Tsp99JWX85I#ujll0a>Ty@K0X2ly*5o0vX3BTU|ajG)jMD^`JV2a4{^9 z4cP1GHd$S(-?N;JBXYSN+ib5?i!UOVku&5uZ&0XYXtKy_9j#pD-nwO$in3|^!GLy= zzutyz$iS2vQERg{>H`CGvNk9ezyRt^rgL~Ej(NUQKD3>87eGp`A$gZMltYnd!Vn;JbETaYm*&t1rY}{}ozrd7JLobpOpz3O z+vYwFY1`B{z}`@#=vq#aU%N232=i@=LLv>UKpnuB^?5n}BRq?dvGRorECx6Ra9t%@ zpEUi z;-^7#$N?-F>}_=uM*!Q}f-%LAgpCz&dSCWx?dCrJKbqe3x$^Y7?|X3fd$I402GH9q zJv(QH;v$kmQY2MU(Tby}Br{1lE>+@`t5W5>$v==cNmX8@^g@-CEy?AmWJ|Ve*%D<@ zTx@RQa5(#PPj3La8;yO(1@4ZEd-M4mkRW>+4cz;?zu)tm=bZ2Pp3P!b+lZd=+@@2Q zS8KtYgo?6bcOl0!PfSe(0b~E?-PMiA+il}e*b6I@8 zDN*d(gzPcjc!sntC#yy8WcGZ5)EA4|d+d#_?(oE9nCL1tVh)|?vM~JuP3|v>HIbEWoKtX%*T7i_ zfXTXY@Gf5OIWS7Z`QzYlCIGiy!=L-b`fwomz1QV|J$wor=a`%Nxx>l7S=acPp4PP4*4rc`0-laYb!6EB$|DM6CZt+SyLS=he3ER zrWhIxGG#>O*AxYx*M9omr#M44*Jy^rLd;W8w_;K`{J$FVM9|u=nP0i8lK=fNl}D6p$}TIip3MSyK_X04y&g z9nYv9u_dU*p|K9gXpz?&6_gTWdjs@4sLM97u0t_D#rC=GECWkuVZPW=9O;_c-qlvD z!TcicMt!2EHEXHdyC&Y7`)8Czdu6JGa%Jj=(em73?vST_c1pR-Xko3}_g60I(@TX!yl3buNBX z1fsM>^+Cn6s09rM0%i_r>Qqp6B)$7uKGSfiiOL6i^d*IzaUQHOeJ^_Ht@ViSOn0Q9hyPWOX<0n zo&j7@DBKT>4M|_S&l`LKhF9=|Sh?V1<=DnWd z@)j|2;~ZpJ_hEi^L@Sdv9TGesLntcv6hfL58^UPI*g6(hFRi+^pFZ3JNF#%h9>U?< z+kthx3e$s}^*?B26B1C9_4Wwi0?DFfi#Jo zQ^q30cEj-4m$gN7A&P1^pzc{;nMB^rrLAznzP6OmVmglYDrn*|AM|y|FMONOQZEYfx7hlSdRp>=J%JPR|edi*yGR{>o2&66qaX zLfrLz`u<|D51xEX`UA=xhDWg7#Yh~6^U@h#jVcJlINneg^EutTr=%pO*w*h<+-JxXlA%rW&LY9aS03 zLMNX2r>E2A!$+(O`NS}WmGSBH;umjny=J<5dzt>)HvpkTBy>b59;O%swMi5Y*{{NO zj-VTKmUT#Uw1&xtv^oFg!wuw>e!tinb+Gw}ws#Yzt4jsC!-90yR^#7ktfhz9CBR1| zeF$(nrx$LoOI->}R@{7-qy; z*pb0uB3;(P{;qAhI|zUTGZrlE9q(K)zuf#91B8_AZ5;Ds&Z*H5nOQ+>0Xo@S^d~D1 z>C7=5sV0V~)%`N;4hmW0cM?q(of^Ol*anLejgSopoLEEwh<{`xO@p#P&!~s_r1bU# z0FwKQY`^45G}G!U1#v9~R_@XTd#>C<~vjQy2z|^Js2oj6-J) z0{pNh{Fv)<4Rk?tUE&zWda$2d1`xy^GWF7kPOz$+9rriz1AE|J$?BViz{YS-tN~|P zvL=hY>!Bm;d47i6H`-`TD9bKI>se|iOb5%kA2Qe2DbtO(bkr1?w$cUI3$UZR2%m36 zWDp%2MP=Nd*unKuYZyI_98~cJ7ZGo8aM6>DhNi92Mdaw zYYIC3)91m;C}`OVi{l6c@EL9d8;L-u===Qq9Pd^oW7PdyZ2hSmmA&(l$TSP??Br2&f0E9KGLS zg?L&IoZ%a2Di52V*+!Tzes12L2f%z9LId2tj)1*Q+4(qdK5Kz>isCo@NtkLoXWaha zUV8bp`LuboLjefPFFB$+6vgc9?{i=*#$p^W%x(*ID+|jqM1}q6c$qQ5=2Y2Z^_U< zze8%m*fhN$S(_#fje@04&8dG_odPBmMEe z`2q4pXbrZl%?E9E_R^CjCWx^SXUGyLI&HoG0mBI>!oz^C-TsK?*ra^mENrhVr}u*i z%cXaIb~}CNhd&}Glu^_e?CptqTD|w(2phMMqXB^aU;W-6CVH<#8qH70A4F(jLI=~* zy=C(HCYdsKoc{g){YQ~!_pN{R%~1N6uit1-Skh03-R=tAG6GFjc2%0Y>GRaAlKDBOT5{2l@H) zfAuHnWOE$_M=>g$89K0|XY7q0^22xEL;%AaGpQ+wckK5;j9WSSdZ5~`AYiHUw%8?yzxAaOf}73Uw|f~R3SEzG207Nv_Fp5Us5~M<@tIG4CjI??@b}Zk z&IY;;;K06Z^K2!YgY~ry_Gy^Zi2by1^$H8mKDj@vzqz_&q)e$_o3N?lbpQC*zyBvu z2Qqp6dHM^#ln$4dQ)}xBV1ujkG6pnGrj#(V$4y&M!IkBumr7glBjMAagy_Za)R zeSZfhKbwB(t6xf6=%tf0&KBp>;-|-e(2*GmE7247*u6x5=(QiW(%$Xc=`&yaLYiXW z$vCMQdEU1`O~NtvJbU#rz*Mq;vx01z`>qYK;5^!Lf)?Cs)7^B2FGd!hkoHDb9e)1q zoA08#FD9dX?n|FRnz1tgpvx34JFRVmS_GXfL6tT)qK)jm`o+#CljwIrlp|jagL^Go zmez=XOp#&|-IBQKiA*zRcNTr{|NTFIgwu4CW}lvCoHAqrkw|s4L{@`)DqyqhdQ>5I zy2O1q_+CI}9%i5)zVDSdlYq%cRSMgMF5tc*hZP-Snuj1yy(Vb&Z`)W}WEb@toIA-g z(~%G$?vt~`S={EC?h ztfh4VENp;Qqq^)4`UM9YU3?CvDL{0D^MviQ@X=nqu0OY@w&y<4T{yVn4ep+QyN)~` z`y%M=i5d9g&jZ)tXc{S(Y|Yil#TtQ?Kn^*~$ADgDbR#UX8hsvWfWiuU4I0|;;(c>e zxR{S+o{Hn{_*{Bia;SL_d(aAmqo9igVl5*afztuXhJE2f`B7}N)tvT<^#TGFo0tn~ zcx4D3D}+JgbNP`E6T;3uxj~V466T0a;_n_G&<2=ZFpn5n?p@G0fW$?S8gs~F5(o@I z8$f>k5#)R+Qg0T;a(@jQcWC%o12p~$0*9pEp+ z6o#bC%|$X07zIv~f`It6?i31v|8Vm@JkP{B`)uqe|E0?ShvK4@8UpU7VaQr88qcoh z2F^kGb8R-UjMwHh%!et$wjZK&Dlji&l}Yv(#l->250QU;eHw2RwuyTgCA6qQ9mIjT z`P4+Ze)T>%sYM1)^hh0GhCJ<-#_5+ih_E*>q!tzI!oKv0Hrs==iLtU$-S$9J7{od( zR*i=APQSt(0W?wg1>;%3BWt33WY;fUU4+?!_2Su1@C>sYXbaN>?S?XCOiHY0jVkCI z``hN3doaG{&iOwXQ45EpXp9I%D2LY#BcF}K&GflXzmhJ&sqFi{v_x#fs-AfdU;KV6=DCtRf3ZelEP zyq^EcFSEvg`nA0@JpO+80;*X}&xImz=Q&vJHRHti8Egi7YRb=E}yLH7p$lkCJX3~`CD zt>MONH*co<@BD<`w~y%}!FXPrM571iFk3lTX8XdnaguD+RD}I9I-~MCFqB4KA-5Ln zii}sALYWS~>yZ;(=lo?#+G zpdBD`QX$GvBMoKllb=YJKJ`oK@zO`+5aZ_i7fuRO)s@l2^4kQV?+14i~&|J(mA?Xwqx&fQJclcLq5qdmroPo^LLvwxPJ2Dm@- zwO>h(NZpu(Rp@+tFa7XO|14em{MQ*ef{eiuW;oM%6i}CS>L1d{B3?SPtD?>(W{#0%Wz*c!h|~I_TvmDQR6_W zPE$4L7xI55i@;YM;5@|Z23c1N(sG;w6}t|EEY={vAkG*1VUiYrtu|BS03C>C=(Zwf z&-_mTxCSr=$p8`s)Z^@9hjVa(CcaDOL#?=SyjdI=|I zpU}QTA4)^35CZ^a&SsYg#G`fcqi2x4I$-GXVp?6<;o0qp{1O0-jvTaJtKf*KmkRXL zHY9$IzP4xaTK;3ADhzkQk!>L(IzR#eS!%mZWYO0?(-C0wd}BMJX%>W;0@0_{Uzv7{ zhDh7U!4N=g*jf`kRpf<)h3?OSG3FLStP!@ zvk#L$62+zjtSFWu@mBh?Q9bszt~GVlN3o=VVlu4^z9vPL2g}iI??Wl zD|&opk9(}}9z**z+8){h)4e&z@f*ZxvPgc2vowi*mH9Tu-06l!B0K(@&mSl9WMO%A zmvP_N$_m#wK^JScM@b8D08l`$zpYG}a$uJ2=Zw+{MkW1FZxAG~^Z=(*_B01GtF1r7 zS<->fS(>KZ=11TCcKQeZ_l@TPPS~sRY&tkP?K%PW>^9mc zhA@h?! z!t);zeVruz&pIJVz$zG0eKfa7VX}2vfKx%bX+{zu$LxvG0&$|n^?YF3-&&8g`0d~Q zpKc(eS{xz1Jt)=+?FVo<&}!MV)_NxQ3f3xGiZr3G%14>>m5kc!otYIk$) zRL#Nkg~0H1~Drt;$6ZR+?3A1?i z1lHR#%;Y2^3&^D6k&*Zub3?-mWpiyUC9`6S93&P2yP&%@M9~x-@MW%()11y>1&HgU zu}iR>guMq~xzKj$aMi*#7y!c7#er!;7yvMHf>~ zJ+v!Di@h6#CCd?pI3m<|N=}?f^tcfiZNLcNVB;~uH8Mj8cQ<_kg+EEoTdR49!3Dga z63zY7@M5NE?M3h0?L!O**FYAkc)a6O(^olc(CKDE&=;LHG#*+6xjPJ?2_$LG@X&pnq`R=3jnz1!)lUwf8kM#Yep0I|$k za*u%GF~A7^WcuLUyYV?K3M(9uJB{~Qpf72MRD;oxOnUX@Yia36-=Pr8ViLx7qL{c- zKqcWOdb*Zi)4QCFjkR0ktdgrqTdV_&Z4Sn3hL&vWA3jLepMM6U%jXQxGVPVC>G)gU zOGC8nnz%a6h~O0_!VRTs^wOOo7U~KoMX5bkpqh47A~c? z@ycKO{2Kwx{_~&x+X#itas7{fN)9n{Py$evX`D}hiIfN?X+8I1YCYLt$j8GpbajUF zPmVcqeuA-?Uc8z{Odk<6qG0-*V;!Y)0TySIAE3B#UQkp~P>L+}+((!e-PYzYEzvA$ zJ)*@RM&F?%oBQMu7v?cM7wlf2&uX4A_=0|bgGH1y=Q+ZvfO*}&5%N1VTH{PZJ2j$! zoo{&}ArHI?=H{7-DYL$Wf#vh@mQJQua0&BX1u5q5vK5gMWp1%&OTgx5$xJ!`q>gi98=*1|sK)vb8Rmtk zU(d^=NNs zTd*1q_Qc%fv`a04X(JU#lfGm^K&3!s-qznKpv9G59s0MpC+-Lmxl}m9B?F)Q)Zjo~h zK!(XF7SL-%GMoasO(bTZ@3}`_01HoDe+K>4;Ir#|?mV^%2_#iuer`S-n3|zmz@^j4 z4ggC6t$CPU_s5|KjygUj1;DjC!2!5@i4p8PkM_t=uDZF5PAHJO-)WKeiW4`)`8cK+ zv_sLaLp|Cs`!)WYV_z#co~D~fI?CqTJz^9Gp#>-#hxyfnl##U|)1i=sJfww1%7U{I zz@Q|Omnes>{8}`?}tUd0d zhdt8X2;QLN4SbQ`2P0px8NW~x^WM^k<60B|W{R|01GF7ME==Q~ zTZC05H~$D{!)TZ7?^-zT+NJ_dzeQ&S@Pq9v2Z&akAXhD%pd2kP?MgMqdL9G(B)^<~ zr|tC2%1!2Rs_WcSo%^vzbD6VKLw}rN$8G0j|N9D{%?Y%1*zd_%(xq5;ivsIh=R`2c z-pY20jl@x=U9{N$F0yWKaY?kH5hAweM)RW2d1mWN8XPDa0GJnh!hU3s#aVRS5NA|9 z0j#6OiD(4aXO5o@(bliSL0BTvvbBy*R=*kH#}Qbk}Zu|YjF+R+O0wI>~bE* zsIe**2N)1z%@ON`o!Z#k$D!R%i|C_aoYX<_40-E2=%g%Q)!z6yA}msc2s}EN4XP9J zwh!MRi%<>Y#vQPz7L9rM)SluIljZZIH>fG?VcDT!a7+wgF|&ZOKA_Jqw4p#ZS0oD0)E=8_xxY zFzO~++gDVt95;alZLlCHfrV+5_X)~|FejkM=^P5XIl<<*7GSa)P%#cpI06(nN_3PQ z$}E+;Lom;!S~eZNeG5Rm&$9qyT{KuV*C3c-t`}Ar@YK|+Bg|^QI^8y_pcQB>mEi_M z35Gzqao*0)5s)tHEhBBkqwKZp504cBZA+K{!W?QhpvUXX;nS1vbqI&y;b+Lt%(Y3B{7M0ETJZ;iD@9-pj|n@N~(kbBp$H~|<5 z)Tig?Qu=Tu`owZ3G32y6f>j!U0pf8v@V&J3amnN9V~?Lk_NTppQ<@(xcTi zp1Tx`pyO<1I9nJrd&&;uX+9uixk}MliTz*x_;Gp{@LDF9@|Z%GVLC)uabCiht7zCv zT8J&dTH};qM0Zh~7GIo!+`sqzHvt=2t z-ervLfR-1TE@4beH95x#_~^ZNVV7!Y`Of`Vhe7Veg2RU&-o~H;6u6gDo4f;#>z)Z@ zJwxTUYpnCC_?b`T8vA_%mfNo)2Ha3>A32MDk>ns|OgF;746*-qEtv*r)!+XRa=c>694Vx6!x5h#r408G5?)Y{~So)Ic7 zqJ!+raSGrS3{G)B5eeaS9e!TsoOapYL*!t8hcDdADdz zaAKqO0y@HQu{pHn^;#@vuAE_5m1PcKYHl%eU!p)nFjWI2s8@4D9CS+URSYE)VPNyn z?MwluN8U0&M{mjwIoP2L$za8@-2^xhFet;^llM(;?jp|s(8%`z8ZbkGRDpAc*J@1d zdSLp*-Jkt5ed)J;Cv7qKl{JqmLP;rrCd&odEdM(!#YH=qg5r zqjOCQFs;mDH`$&MoD!?syB&&ku?>7Mj3qt50k->q&+q>GZ>E3vPyZ>Svnj;ExOW;= zWSLxeoO;A!0==kpWI)X@7Gs zT_x4)kaIYK15rCA-2ebS_xy{ICiKCN-b~lu_#6?7L-rVOM2b>Y2MQ8O`?tIHBwZne z;czXJwgAwBfUBvgi69jWWFSJYa68CL8D<$hh~BQicJIJG7m=qD27d@}U})d)vg59Y z0nSIPTNR*dA)0ydMV!72(>gn}V=MEs5OqPZ98DYfvDUKA~ zfMCL%ps3%pNGgj_mI-u_QBT=syO!9|!o2l~%M0nrVNfgK;HO;Moj|ZJcs6&l3ftb{{$y&k zp-zB{R7kENF=xu54n>urIog}r!7dTfQ?k(LLL_^rlZWhc(zz%08GGe9P(Mp_N#cym zl7hm*WcdCB=W>LWn_cdsgN}W&xplD?{LZM%295`CiduYPQ*wB@Rm06 zfUX(>P@_`Ca7wY8ID13L=m6`f<5Y&xw_^fZ4QWQ|7W>}7+3Vu4Q5cgR9D)(3?Q)W; z%?!*@7U0X?W@$m%pr)wDSuD07*oH2Xsx+MrDD-V%SL!kXe7+=Kw8UeN)N5s)vqj{n zK?}4q9L_Uzfzxi)mE-74zCMcVeWsa^2Iw%T& z3G$3ka~-U*Oj-*@+Hj1ayfX`oxSqztu(;LRExdP=y7ffOp}O@xo-E+ksz1XAu_^SZ z^|SOg`#hUH7@Op1nFfLRO-F{45SlEB=_+3^d>A%?LKg#J!gvct1SB5o#SLniB4y}+2QA| zH2}!`$1Y%X0svy8>Ka`(>QE&EhJl7Lh=!nGXUnX+1$(2gMdLU&9w2-N`stxO_#DF` zEe?^nR~6RQaDgm<3eRM4E55l6HUcJzln(Q`ZACK3^$lOUKs6L8`X0k)9=E*)ZEZu& zBrN038li6XNe9J#&C$gJ1M5MT6%TF72WaGh)FBtjQsfFM6RdNzCt)2L_0O@@Wp8Jpk zET{|y*y}4VyuxDuRPn$qrZ~W39YtYZ9(xh59fSY9{{*wZLwpQ-R4N85-oIfH;7+uSYvm$wL#Anz#D3{LnV_uZ?&<`HTv2CD^g@VJqNadtFH zvaeS8%laWA07v8y;46zYiotIq;D+lS-o1E zpQlfb+&MBP@B@-zm|+xI8v!Vb$oWM+F^)ciXEAgXXJP<`#5x24hG#PjEBbU09Uy?S zR>5LOd;EHC%-;4&^6aqCdm+B zjkQ%j+hR{JtUdy9Hx2%yoJ)o_sErtKq_V}zB*r(t}N#a&Ws z3dnIsoyEFEMLBZed+Z}?+q=BQW9UIyPG}QkAE)OD-9um+)tEV)7#-mTz*cs(fu7Q7 z@&1a)N{Rb-{RhbnKfnNHuw&X9WhXZ|n;PikBS3+ZoI19rqc^hC%jOoZAuoG46Qo%9 z-0aov_CEKBQ-Oo$y#&(dVJdSXk|6E0YJOtzLwF= zHqO^B$YyhK)Bp|+&UYTBdlMG&xzAG^&7KUhX6iQ`Dtnpgc$((6`*-DrO-~ou7Cag?!8;8mPi$=`@C;T*f90ah?uv@()+H*dLrw)=}Fx z4og`?M+%HR?=c(!J%57y5755Sjx|OJU5`mRF-*_Tr!voFw9QFz*PkK%g)`|8CPy4w zglF$xR1;tsMw9gzH6PgBlE#5r^=&A=z8$@_WMM3tlO9P~>m8fND@%xB$ z^L!4RIKz1$%O;}HGSG+X&2+TALo{W8w1i9GFo+0>qO+{$Aof_h#O=nBtY0<(=wO`*@(SM-YxkR5W0fN3X6n%8BY{G7Qp7IJWx(0vpPq10wLkWCbW zJuRj5Wm;q65dva3T6#H}{_;EfL!#QlXsOf%t_3Pi*(KHyVPj+UoG$WiQQCr91OOT1 zX8;z^c_fM>gqT1SIsiw3=e=E05x6*i!}@U#E~LdkQv9>eJ$u3S0*vh{Aj8WEA*_c7 z$Ir4Djy8ZzX2}i{&%gExp&+Pt?yJqUtOTwdoly*NKuC)BvtQ<`oHLSiggmV_VX-=e z5ivam;p$)8=xUn1%ruW{hT?18>gUv0#PIZZwez?}2~J>AkUL zwqYAYX&=fC2rRgt0v!&TbfIuiM~6_1g-|$YUPGv2=D%F}=j}vz{#ZfQ%v>O4$MMH9`AhfLX)#qw8!#Fb7;ZaZ7>eqU zGuwcnZc{iU!_=r#wGKVwnA&rgw^5qXukSGHo7Ws40vOnNVS4k2Kc;Z!DrpO!O7|%u zYwW#~KJ#l|COgz;J-d)hU1txsgmvoLl&Edh5^rB3*s@lQ=|U z^iO@9X2}g)|L{G=4o{~+o=c^E#J_XoaTbQiu|y;@TrUq8tQi_5yktM*G1?Bv9yNJ3 z^N~*oF_$^FUCvQ%ZjlrOCNts$M5qmR7oURtCC?17S(=9ZqIkn9?`u=j>G;VS<9%Vw zVBDs7kG+OzULB#mJm9^}9(OymfFfPNK_d3IwKY&$^Z2+Fy z(Pcdh{~1Fz`dNSk-4P0&CqPAK`F_fi7ubS@=xjX!^uyY;_R|#3F!N`^5yMuRc{t8Lgw`(GD=WrShDxZpL1W7H-hFbZSr zz=3dHQMPBzlXU;RC+M+r05m;qu^Uam%HGytdgi6)g7KU|9*!}^0aVf$HGQCQVB_pO5~Yh__@A6+TXH4jnJB7;#iCvb-M z=!9EiFiBXygN@K0i~$Txy|Jp^+)N9AJ4AfKAj5EFiWBJv-~4L|vnSb`JyNvnK6Hs3 z)iSabpU>LaZcGMNGU<%>DQ_4ISZAIQnKQ3^9H%9Ev$7tJAGXl04lC+J(<4%hOp`hX z>7Ze%K_7r3VWBf)@7F=>&>>ES*D&qS$ekTU;HTp3 znC^yYz`-YnH&Uex=kwe=vuTYN*OXmEXRvqXCv(CBj1{m!QPiaEM)0aIomT8e#{%}& z{O%$EUB_FXDiLFCgOLoU7s@hojQF0t#VL0IV0+_Byl~1j zDu6_+Egep9BC(&QgKA$QYR5C$LCQQ|^{zS3b)pdFTbuK1zPIZk))FWibv(gl=Ey@H zW%$Z~?)0(u5@QjD_>6nC0fEtP34t2dv&lj}OCJ9o za*Qk#h{#^MeuHP`ERmMzMBP4TS&(Tyv+0UXAFLtwIlTCA371_*U=eXgh% zWB!rtoFc~s={AHB3YPB`jL2CfyfFXl%~Y5s!G3QC?-roU^?Ky@=n>ljDkymjAxN8z zCI3ZM5kXW4y?5X<3bqPRf&D>b$Bkwx;UZTt^(C=3J zxhaw4?&0DPA_@G5uR!shP;+Ty7cPLh=@W*-toBr4>rt_&aTu(cMuTx3fG+pw=K31j zv)HQ1FM*DE13D9aFWB`nd_9+pv9T-kCVgu5TB-^55Xb|jSG6fL=%EzOS%VYU4vXE! z@%nu0+#J)*4j+Y=T4HztzfHSKt4yq-Vx33u87B$N2qG20LD&sLh)&EYuz>_1G&A;6y9Jsf$6I~RR9(>HCIy#8J6!ln#>iT2WiySPf`CAm7oVBWH zYx^-GK?tl3=r|zkMbXlJy_z;wAL9FH0Q>3gyYG@ZLFWNjl4HQx`o~MiCCV27W*~QCxw||I-ggTHwTsa+kFu?DV%>1%aQz>DKZCa_l;3_R0b&3=d(v-bkA# zJPZFDAg^&^bQY(qknVi*Tfr_bJoPkrw1h<&_q~I@>JgHwKUqQfBNrS7JG)pknnwq{ z^>6<*XTV4Txx~mo6h5+khdgtMHC_GjkHK`L{#ckx2-0a~=CU8*K-5XuDgt7nD1~b) zLot6I{nI+!fZ@R*WNpl2mK7Fo5pF?<)t@-ByvJb!7Tb0>+pBf7F&(X5T(K~EHl>E^D_*C5qbK^aT!o*DvB(S zV8?K~`eGag*CLiW1i;h`DIpj~3$$|V_XL2c&KaKJtXl+g3VV7;7Zng}7`!3YO5J?8 zOU^v^D5K={J@7-U)xo3X^ztvgj;-LxzLRU7Vi36kzF8@JHmz zyiU{O#<35>d{#u^I

g_bdbnv^cP2NKTO55RSVEY z(mbZ{jMFIb99flF{7dXDr%cq$X(+svB-AUoxJ)J&r5*{sg^l+EYZ&JDKe|ia#qB&& zrYAK6o%1*TUF#0Gi?S5b{HjlHSTo64^wij@D=+rW)!}nSfFYL=R>tAV| z`C62>GD!x~s3lIVR8_G7S%5A8_~>GdoD|5PlnGI`6}j4&ruQ}Wxz|AN7DX%`(Kl@3 z#$K+nFS@Q$vo37~NRHoWowWOP*_Qlaw$0l7Q{y5fz){kjhMJL$eWVmd{RpjXCEP zN;?8PIHCvtT*xu5Z>cI);gto2mCUrnlL~0O#w|t&QEhuo^6rA8;KRaA4%Jx>#yKK( zYE0J{H>0IgoG7BP8^QUy@#4>)?jx@pp`)T^L&K35oMRt_vBsceKkSY?e3|8?QE79J zh!c>>;t!T{Z)_v3VeY7zs$nBsSm?))#j-TQrsT!dxt`46x3V}wiGQI@WgV1uc1;J( zi;rA((5fztKV!QE>>Gqn@_K8v&1mwq4}`#s0@cUM`BA>so3&jh2;5k->dvU6espLi zbtgcqg2YQ^*wMCIe7#+*}N9e9Qjw>YSo z1J&yvB_^J_Kjg!5)~b3EMc%dKD5lz`He-In`+$58SqQLsuo2JkeL>IH0u~D73$Rdx zj9i28H#-yPhcU;VZ0+#k2d%zFJ1JCAvok zr=U>X!u;CzGG6$GlZ<2Tl-Tj(Yqyaa)|`2rD47qq_kNncQ1eYv-w^T($=+wJhrPqlVLKA}6xZx0n~DkeeqAYt&%%JrHbm}e$X^VV9s zh*H|HKdZQcW9}U|R8HNdx62RlkMs{IzosU+vup$M*DPi7bNUVAj@bBeyy-g}rjvof z_B18)+3vX4uC!^o8IJ}8Wd+rM{R+WN`BOB!66p3rNY-vl5M?)U8G>Tsp3<{?m{i}e zcAGq6A!KYd3%1PmGSy*T`X914AA~TDQIa9Uo-<|mF#u5&{O>YnR3SAyTzCo1(gj z=T$s3N1Ub-up! zTa=@wQJ=yOqr;Ko36GNUcbNy2aY)}M?WdufIfs_w$x2)f@36`XO2!<1^x}lQ zZ@YED@jrCB<|{SuL~nA4iuc1MmcrB+4NUIqPIfvxQIK zo^z|+`Q)4W#w`33%2pqV=t_F46jpCo8M$HO-r6s0s9}qcMGCWA2J$WwX_p# z9lr`qME_KaPwG)15g2y1Hi}Yc(KZVi{RkR7jE%&HXbw%cH50QD6xxlVl&-_=+XW6V zkSm$BqX~lc`L*(iqgW2I6JvPpLzfFnPOq%5Q(A4{nJBI{N$ zI!SZiPPssvKK)|LsV8i*{3F*I=2EQH4wSlk-n_A7knFB~7oGA#@Ta89KL`oAE=BYR z_@qpB@Tr5FTIh(K06HN~*M;OP{QEv2J5mDm@w5%lE5X(`S7#LCol(;s`oZj}-lMRl zqBY@wtZHwf?;C7sdlZ?D7ZG*$fc3cg5bY6jAie)dJ~1ljtMhlA{H-M7#LFTd!-tSbNx*v4^gOAKL z{~Q~t&)yKorz}M@!}_A$(Ny_u_nCwvj;-k zPz9VM6CKw&6w#JCJHzME0HR<~5-y8V;n-o}!x@0&OzXMZ_y)jJO~2mWqSWi?3pAF! zad_D-{)YthYl$i;IeFx#av|tqdjJh{VUlDS0v{zSxhgmi^b&oDd%pe`nOYBUB*ux4 zs#A#Y$NO@_z{1+DTw-gR;Fawz*IDF{s>dv}uM<=lM!>ka-m+LpFyVC9f+8Jj6F47T zpd!2B01w-_jWEIgNm*H$M8C}?{^Z04;qwMee8a&#u}ScUR5?8|PUJ~w372^e4rv3+ zXth7==_ubi@=&;({7GVo7$=!oo(8xx<(uO+Do!TX^C63DxF($rW5_#=LDS0ru7lo7 zt0)|KAkW-QJbHd>Z{Dt~tV{w7sc0A(Q^LZULPfoQv(M5-YUnvPAQn5@ zg!M~?3_tebit;re(nWK2djo#1`O8QX41=sw`-$rIWe(_vrVz@J*1kEg>{=IFyBx=n zJO5@WB@^Vu8J6b3CSI_!ar{8e-M7|+>a!sE zMZ6Ml``~%G@_f}WUxN6(G3LFWU+T;&A-7+cV-h$?V{9P-*K(Q}rDDG?aPr2(zw|Nv zQ2Vm-C}kZM=C;*Mg!8Jm%+%P}(ZPs+8Sb4LDwSV`2*I^1$oK|^WDP8TBOSv+j+HND zfnrNGYK;fFxpksgY#pRMz0iSz{>uIL+6s)_c>~E#`e}OF|9C9>?q`2gGQJ}5qF5w+ z1V~Aap+*j-!ClTqGDJB}Z3$)67U6aZ@k*_g+`LMWE+?!LRz39NBDX2xR1y|xpq`Uc zv(UpBYLC-%9C52BsCHFgkR-3`>R1Wx*&?x5uY_IO&J0g<@Pd3c;ipTcRrF&$pk5$k zfZh6Do7)3Dtuat4t6GMb*~8T64r~-k<4JaTs!y?86e-O3k?zRM)vlGV*a>1l@ol4$ z=H4Kc_>QByK*LRK9uF;MD+{o3muB0VGDntYg}se|g=fk!aLEZ?8QqY5((! zs4#&O8vFS$8u+48nV8}>y6ToHm4l?_c(bNPWQi)LV*hzrt^1RC_h*_|@No(d#o;ml zKxWx+Cx0l_|AQLG&)0oM3xoC9>IQyB=GJ9wbPGh&}GKDCzHQSkxI0! z6M@5D2%XUH(jUb7t;O&qfhlHY0L=z~$9$Bn|NIVK-sFAoZ^l&>lSgAnlL!3VfU{aN zS9%4s3DCEu0o?{6&KW8mG5YcP_^zWa3eT z8Xft3)Oy6U{)sGgc??K3pj=(Triu!VB5NLu_x8fgEnsF`;Lqh$wI4LL`_xIfWJuL& zssq{;O^|+wtOOfLBv5uIDG0dPyMA5%QvRWNU{8WvS}b8_hH-6s{cSUA{_W94U-~Dm zCAE8v)Q_(^l|&K60@PLG7^@KirB0Qn@R0|4&=a2iOU*_apgT}?8Eth8Z{(w$p>M^L zQ`8@nu|B^}e8R&`+yAGw$5E(-?aROg%j@p1uFFl&K=2P|*o?1_!Md_Zpv4gh$Re41CD>eBo{}ouo{2Km3l=y^D5r2fLu|-XVADe$v?2`JsOSpAXSY5iQYH!_Bh; z>bf7!P3Hc(03>&2ySO=|f4JQR-D4Hboabs`Hi|#;i`bQ%iyEyitKM^ayh#ofZbT)% z{`4K%=+64v?6X@4V22HQ5>BAriL^ec(MkG$l8KE+ZTp!R8vF%MTiRp#{_J9wpF|Jq z!b?@XUJpxa_`xT)>W%-%7R=wJ*QyV2%57=C5!Q%705K!Z2hmnD_CXo>75LAdgQPLL zS3S;fW6r73app{Tj7)k0vtSaWg;U9!Gf>B0ckZ&n z>_LtZh@GTw*h`V5D(L_vc|W{_g@f>HN)tTdo+%T>>9Jm7LJ?a>S@d^@;#_JoLCg53 z?40^d?#vqXD6Ub8k>UjtVpcSua-^)k11_ZCrsO{xCn;0O3-at*-=CXnkh9|;S;s&T zb?+T}{-=3d1R#W>{+w^Q{H(N2SlqXb=M~hm>>b~?rH>yfsbqx>H+^)Q-5HX#vy47u zN3QcVH*RW`K>TC8N_~5wgIq@)Y-#?JjPn(Hsah_SEY-eyJ*mS&N z5Ek9zz<_~3@fH`mojC~L#;_^d7di6_S{FXq_i&KS(WJR8r+u1>LBtVIk){UK6&=C# zhNgM|2y&7b>_@0u|6c^ud9l};v-kvPDMQ5*hI1AY(C=XTJp0CFB@6{lD8Qbw;vrpRbxZ z^_In&f@M6T%QCNe$D>QAtKgvFI5;^oP45HXMG7$PV57a{3UCz((`>xlV^0a~`EIcl z9Xw9T=gWato*#&;SRWY?0R{xM5)Ge^G@Ss+#j(+3y0zSssDOYEo|hw3yCHaXak}=M z>VTo`=?i!hs(vgWytXjo{@PWXQV#x7PL|Orc3n5i)XQMbAfUsVR@;j{{d%fI=bN2n z6`))*0N#zO37*bJg4yr`NXBOi27;ZN&irbm#XS2ARM*UYBXFXd+FCB4xLDf=$Uu`R zGsk5e$?|n4zg(o-DS{TP3aK7Ii=z*Iv_ki=o5>b;v%xI0;fJ0DRKrdW%7YS3%+%T_ zFQZ9g)fyyT3h|8c;0|C9-u?M@A#N+$Ffvi1AS;=4_D@5KmhhzKkhx?6^I=)+K)K`e zGgKwhVD&ZH`72?8-0eVYU0f0!q`Z*)V50|<_c98%APhz3R!ld@^e1uWbgReczn`)Ko>)ixL!+3mYt`3a}57e5uSJcT-2B4Kp#ev0j z-G|FhcQ0)2_&QRtO*mDuvGVv{K z#CgW9Zk+=)2Q*9!BQNW7%T6udIiO`xWdLzQ|2q}gR2?*$TOpR&bpVtHLIrr(+6x%^ z-|Tj=3_SAnJa?1dEKV~yXluG(+Pq_lrrX`^ThF^7U|^QknQU8bF}1`ubmD9=)ulu< zgp(0AZjoSQVn4Do&<<1d&dK8;L-z_!5mMK@I3@AD>iomn#+`#wmV!7)1FTe_|nI*fTcCU%lY{ zU?QH=!DilY_`Tl9IlS!ldD=5@_g3{WE#mMsy>VF$qg1o8P_#7)YonbG(Q9GW)+}LH z+?QKXkzxX<%x{!!bBJ|Hngq>cHzldz)GNT>nI-9cz#(4z*qv8BFn^^GZ6r> zOJVoS*w@=jUIzxdvo*@i)>pL=_Klkt>FF=rueY+na z(?l>o2=#ni*ftiF6F#Wo^W1+%S#y+*qb-3gRLzRsYo_hiN-hDNPOA7T8UE8hRv5-% z?#mkiKzOY`#C5oXE;1$-&^?`dU1X&KIpw#UQGc6|x>r-{Q6f0D;N&F!iUTb#J)3#OE)-V4SE1TR69<%rC#Z99of~ z=5&Et_#9`H-lSKa_M!(afA3p^r_Px;Z3jFS{=i+FVm9!y9dcieTIo*gGFF$fK9c6R z02pskB7%%~R7_+`O6b)VIznx3ub9Bw&n}y+#N4g` z>C2t?ot7ZNXO|Jp%)bjVgZ{qD6q(|q&F!4{ZA)D~?R@u}fg4i@D#<{Jhi$tzFdFsc z-27AA9Bk=uS)i)?C{DaR#yh-K?!UisAHCFbp4`_p8YE(r(ON~M-8GDZ54C1h5h&LG z`V7Pn@Pg}x2v;l>ggWlNsJ$=UxH_o5nALX8h-&$|+=$+W<1~4*n&-9jMG};zbjkl7IU)bid!}XuPtVex-_1&HSdk~!Ax2Xv{fYGn)4Z&dWusD`J`Q55>EWYa> z<+Cc@K$&Ik! zQ6)s+2Nnd-7?MA5sX~)*N(F-VMlWw5`rsv96YDvp`@Sz`W(L41OESVl?ahbR{coep z*rO@Wv%b3w)8~8#>ORJO{LVE8@^NOMcR8n1srIR_1UHK#lIPQ4UzmxUeNx`XJ zP$$Fs(b$=3b))%&3fPLsae2y_Fru~;iU)RV<+pV zNG5Dy$TJT_j`mOJMgVVyI|IrF4vb6(`%iU2j-m*5+B8!e#d7JF1 zc9=IdazL|ik!n;=9ymP4(Fs<5J8Oh<4C`Dvm6#UBjvjfUi8hsmL9Pv(Tb2fOCcy}& zzQ8d|K|vu7XuR^{2>Nz>!ex`n$jATzhK`hm2cP}!1D`Hp{n&C{kN!vp1iBlRIr=5q zd`5vqvBUITaHyQ6Z_;A$JpD{!VO+;~AVrR6KhVI+G1?(N;M6nBSn~3_OEk4pye0z7 zfR6J$ADzYSg6!aue2S?e^|I}o=guz&1S{EL%Nd?{%ezRu>lCTKN_T^zL~m~fl&4s$ zu{}|o!C{@%-F<`!sRKex;^lYjDSCEE%R++P9HE0(9Ud3_3mn(AP&nli?3`@u6=#yz zt=;l>4Yge3)A^<HBwAmPAqWj84S9ihs&U9KSg*40tvtiEOc}R!@-vW?0n(( zkYpowa-pc1Lip45#8qc0cF(1v1DIp?&*VL@Jz0i+OgcZb0|4CheZFYTZ(mRE^0avacUz9)Lcr&77>?*WacRyGScQ^;B0b=n8d9kjm$)_HpGXM>hs>6% z3(!F_mNW{kT7NABT{LErs?_QfRnL}%*dpn*i+d&IM#&+>z0y~t9))l~3p@aR3)#f= z*YfdyS%4+{-w~Pn*~T^a`CV_L9JR+#bboU#7r9Wx!0d?%0>UNd7lRd;U#X--yrd(R zyk-}PTova27;4SKkYn7UGKMjoRI)6Tr`_`wPJo#5pYinFNZO6}gMvI=Sg@`PII;LY zZD<-hmCp4M^2eAFcPG3qj43u~G;8!|8~Wr%uk%J+StwXXRHk!JyF1ZH z_0>fq;IC(x*i3CiWcAzn@qeBEf42-rKVm1Tw!l8zz*-Nk{;W9)AhQ_#9*m0l*j)o{ z7|U6pSKo3PqfsU1=Dd!DYkJ$%HW;Kvg*w((;`m_L{3AU&nG z;E%;B8%AH|n(yrC{qNY`e=l3Vf}ny+t`M;0V2YeIF=;cSayUzpMDvPzO=^-|Xy1K+)8f`1bfx?AxEla5$ zrY!^evq9O1_9=@6?iS0CD}nsdurWqI?++cz&)d%+qk`NgclFNkmGxf`c!C`J#c3?iFyeC(QlMnL+kvNE!x*CsE0 z%N7q7UWn{Wh1V$IFO&5Li9ts68?4M+9TOpm`otv-sy#jvr6zw%wExfSe|npJ5?XSh zNxU4!_X0jN&(C^5R~vkLd~R)g!TOE{la&Kwlm<$b{C5pON?8bQ{BkU}=hDcpn}*gU z@5t$QsacD>N-PC>PKW06_0-*#i(iHUNyS(O!)**n>m4(st`y^({?E$od-wLf^cYj| z*Qlfwh_Xn5-Uu2VCbcDVzSF`q8_pX0WSlK9Ke-Y>H-kjW=uC4T_3Gh<(n zO2v?Ic85#<=4p*c>x4lh$-gbcJam=u|+6WJ-` zw^dVmEfZ6?g^S9hL1*3Kqt3v+{?EYAsU>gE7S_54{Jfn;(&W;YDiZr8p>>&>fJ0qr zWtu#fX@gO90b_UsDq&)EnNFy>0#U5KB#hoexd>D^r$Ad>;SCpgE0R}jOXdDK4!s?A zbhek$)5svZqrJ&M#>yv+BY6;#&6v9ne1e3`9!*8OVZK37xq{1dEuMwvzHb ziQmhiA@i-eAdgfnoLTOL=Mh2xItn^&My|`b`8k|1B^E#??uB0g-Ja=96?qFQ=B-uV zJu-KOv6rx@IX}iQXSLghgruT<{slK@Nf&fbEb~h<;=5DeaQWwKJLk!o2x!1J9bRoT(7NjkA#+F5eo^F(h9ip z?sRPhFnSS5_U@!8|NHDvEjk!ycY?Z=y3GpJZAY}}e4h0|K|w9eI$BTPl{GXd3niix z{u{Zad0RvLzm&GrE2Mx@0S4s5<;bO8IFRocMxBs~@(w+Tc?J8nmTQ3^Dnr)U&3er= z+>xiW32aK_b+J-HdAv#eA&L0(Y{ye-1{HnaIJA4_{~madRpmjAv@mixRktHqX}b?+R=_VdlN5NpZ%~RzfAlHFH`>f zUdK!;`6ha{B~n1~keP&~K=xPAW~ZGU#%n?2cKNs~qFEzbctx48UUcK*InUb4F*c*1 zB|=R6-aj#lg>gV=kr(&Au{`q$WF{VDQ;iTGl9Ek^81R%qG1zTDS|lTqNlPJhL?M(x zp+O7AdYJ=kN3U_E6?A{98WW_ zsVFFFwssm$~5J;LAwECzDdY643n-9HIi+bZqI zo$c)?R&&Moi#?m!IXP$RP6vGnHZX}eOap*pjYZImJm=aUV@h_yfx=jUzw{ggBi<_N z>SjS4=;dVb+LYWPuSBo+p`Qr(mO5`Xp-e}f?>6lDps26m=8OXT@5`rJ*R0>Ksa>#u(@HvVRDDg7)fi$_=B5;pL;Q8$Hdx_=*azGBxtf&HB%#9M^?vwPl3 z$HQJ`z0R}Id3l@OeRjIiOiuTcyyt9#VRY3ky`7DxLW9}fXRS^X?29eU z_>Z;ZD@Z{zfZ(O26h}&Vj99%gp_T(L?#Q6ZK+%x}Ib@u5iY>?uFo!s+f+LEv2@^Cp zsi^6C%u6`wgz5HNtk$QXqdTr8U(Wr08ScpsMA4yNMye|+SWVAnWtoz9VtCJL$(9ii z5GXIV9VQWiL4_?sX)LU)Yq|{Y$npB_<0T0#G1z45{oZ*^0H(~0bQ+a$-shdoqUg|4 z^S^&U$rHro^0xM;P1xT$`yq2Vg|YFkUPp_*|AR-mGzqX$hC!v1dUA(fl|Mzj;raL=33Lpp)miEu^{+n~HSHA$P1D zYU=$gi*YYB1pk`*t)3uwyzfvO2RItcs)c?gvLnUjr)FSCX3%ZIKIe$1s_DGVAekRF znl0;kJfcen44Ux$o-V$v$TD?;j!?IXOIYD@>>?-(d>?ixZ;pU$h?9qTe^cv#ChN-K z2NG8E|MM2jTDX$p*)XEqJ4$buT|@pkpff78F~rCmfi`~;*JP?SeinRE`Zk$m`Qmdm z!HHK8jk>!PO2jeL2~==xT3Ud8>3_$-6kMED$Bf_ev7vkmr!uVH<6N%XK9b@v*9_|) zq>DdNhk$)@xLBSydN#7a{U#LmP86$L(O@`Ljzz@8_Dskxq8MPY{!i5XpG$%IW={@_ zS;XNxq3Rpn^A6L?3Vur(LM5TUSLeWCmgp>>flgC{PNl#$fXyHLt^0oa9P%lQ`c{{1Z>HgC3yo?Kr5b45 zYro6_)2>X2V#XkSs2o)P&n@+WK^q=B(IrMqFskp3H!yg0JG~m06%1TmTz-{tAbXaW zzYw^u8jB1)C;$xxPsK>_%c={jZoo}-5KkdqR0q*1%6XD|2;)zqPd&r`2`1jI$gde%bj0YO2~=r2fRl&QEZovz0kJH>-n*@=m!!(8MUwT^91AQ0&LMq%zwd@d5n zz6XY+tc(oHx}#UY(BL5Mwol9N1e^bfmi~K*bPJdm(LlNNrx^T;M^BG$R94hX;s^YC z7I&R-A1p5wJ^iW?EX z)?YA?nEl^TV7dkDA#5DLDjuSsruQPFbA!|IXjrKkB+2m*P*%b36yHbs>F3 zFU0Qao~1N+uYjact6<|2-X9J&X*1ivKb#!W((0Yym2vay(So`N*`<_zh-_wS9|k9t|EV>YK? zq4E%rRaWKTc0_DpW0mqDIo)%Y-D^T#u&PE!>jVy8gQ4Rmnu1^xaNC+OT-)#sr#)~) zN!-qR!+?X7{mUBe&lu1D1vHWOce=j5qsUk`sOoR5El~J!@GTq}DG7X#JvA1)?X;c| zJW>1#4={)^?4RLsFPd6g#h;$8H*&Jq{IMa2gL(ehr_|B%46{qCbr`ByY##DV|SW zIh)6cO^1d-$&g<+yEIMg>sq{BvuHKkmb)&g)d3F?#XX|S4#g5?%()+c@?AcgVJ{4t6Dxu%P z+1j5dhBqi)qUU)^`UaTzgr|0>7?9Xi);FnKZZYLrMRnF$3*0boRLRI!ZkV-CA&dS86=hWhy4JiHG`s~RYk-a(FGCu z2u32D&bw>)c3>8NMZE~Zygjr!%e|yh2C$;FZ+Z8qhoc@uz~(@hs0Fnm=ylQWIkS&8Xid z<>uBD6dKz8pPWLNNbeDhiG`IZ&>SkL)veXeXPt#-@MHbj~5jXd3MK)0#mt9Rl|ogz*W>7v2`CBWZ0DT32FA(+ngnAv|U zt)+szU>x*KI3gEs{HrBhX5%8_uu1z49AdN0_J$3oZykC~Lm%~T<7vh(NJ=RknU!;0 z$1{>dYe03_M87D}cce+>>L?MP2BBO`bDlk8N+g+>JP0nLyE``l#&3kCiv6SZ&o0Fn zL=Fuu%(8dS9lzQPzWftXzZLIVQ6tTaCRyL6>O*BK#M$>_;3Y;&2}q z_AILEV5V?2y7V*e45{)JHgT zgVGRX!L zYY__7pW2~1jygVo-RpCvSi&RkA|W;{8cGqQpL+;&z^aYv@wDs33{4#$=Wnz&`AdkAnl-U^r;=IT(n67)e`l5vi)~V3ag<|SLf;}$g z&Tmd~6_6=0gGgIh6kdO3O8wetm^w+48Z(9|*!h)WuwV#Py`P+ZCf(4hs(Mq3RncWS z!Um#&#gi@TC{vr>&2y`N)7Cx7%nDb?dkoo)siXC2k&+AXyu}_&!yEMXr^@1nC#g{- zt@ewyFeOso&`D#uX=D78bdaThsu@L?*lN>NnrqA!Dd)6@wvraBd(-BzVOH77xW!B0 z5~Hl0%l)GVf%rc;mTWzPZ`1Gio-E=opOq|MmrP^XU(dA2bAb(ON}<(hdAu_B)1S!8|l2hL;MLCN(UF6t@Qg~p$3&T87P$%RucIAqnf zS7IC!E(_~_NZR%$k}}UvfR!sWchaFb9Q53U#en02lJ>`E~}Saf)^<|$*yU2T{!<9!Ex(DNoDY@kfggNKd$;yNiL9ihQL zz3u(=CEjt|*h&Wm8d?gl66+rrXq1W4h5{mNGa7pOq>bk@cQl+buOYjCfFAp(d9Dv- zO~++~_t^S--$elJu){|1)yUu5dT7X4yuPmF^54i9A)iO>^6ej0#StJRGKc>K2joRx zjdy>YeL(pHJNZJoY{q_b`EYp(?3?%$eI!mV!;JOcyo2j-w`a~0F=)2RH%)~A%)T%#6%nrWxN99ijr?U&74#=xTN8&l9Fb{6Y38Mc6LSJhi5qn2HN)#kd@bN zv)6wh#{&wmGR$B$_c~Ky+i4ijCn8r}e{K5c;3kVs0VVY_l3y2{VjNQ_>5!sAKqR|e zfz5zj815 zgpBhO?d82PIh#!2N`6(Pci0(>hRXu&EN8cuX}#R7ap0^mC;(oO0G6SQnR2TUNoji= zThc;NTc7xxX)+#w$N=XH$IgpdHlA#AvW%Wq9P0WxF$Z~;o#4k)$wRF z@HrEIBI)=A@j+snWW=o5AyB#$(o94tjvH#AQQh1+c%NM4?CFagGiGNRH1U57()Lo@mWgpL(iDM4C zQ7ktXbo}=>Utb{eKYadIkNpm@;#y>f?2}v$k^O4g9O_^_Rv~Qyr|@Cr=EJ+2Mc%0cSZ{1*}RK@A#*OnbX^+yhHu4j`iH4mz^>0S z=^9GDKKiKqIN)O-;W3;wv_wyvlO?l+yS^rIA57b~OYI^w^99p|7#95s?RJ-FmwZUA zU42NkizMH+kkMKIU*T)gKG%kL>h1Z5`XZtMLdT=)_lM4gv4=;Vmjl%;)GcVs4pFg?cXs$?pS0eicozt^B z9wP#m@1-I4A{TT{;1;-v$LwZ^AVafn{Qi1@teeiSC}kX7C1rCEIF-)temt-{dwt^| zwSg03EQ2rE&(7!|$H^s%gl)O76zsu#H#vZzZ_UMoG^vGaiJN?e*a^UXAt`S0eA&Bw&>gU`B zj5!W6RKfo3pUr=&MlaPmjWPy)&#u}4T!f8{?We_EYElwL>(k|!JR8~TEo>2fSN~HW z({qw0&i16^ovRPf7=ZGBFFGYr9sziyE!Y8W5;ROqoP@TYvB|0Ba&!%W65fAAZ=*r> zG}Q@Dm-n2<6wPYMdYstZ=19_s(w*pL zkB_7cMpY9vUyC{`ON6U&H`HHPts72$y3uD4W(n3e*r?8S@s-Lk5@nJAzs7qkdLX#|CHyfL9V_Bxl7-+fx-mZ3MChFTCZBi)JzSse+zeNtx(W&Jn2|dB@ zsg{F&Osy(Wo6Sc98hSE0Sxv$vHFSTDodkxYt`}7y>D*2m<`u;n(yeeWJlbzm+wW~Q zyxlobXSn84{~t$Z!4_4!hEeGugrPxdhA!zY>F$t_ZUyNWy1Rz%ZUv;JOS(Zq1j(TU zgrS==-}wvs+SlIi`#kr(7GG+WpJf}q?yB-9Rlc#{1^eudR>`G9W07j_v^9|`dF>;C z*sqeroO-6lz@G@v>?}y;3xb`dDWgo<`vrBi7P={J+Z$*nQpmk@W+_>sH4d5#pWhma zuqNa$E;E)tx}7pSAWPBQJ4*wOj`(Q364xRFx=4Q~KLt8IPYM4@+Hnb(oU_Ojb$>;R zBZV<%LaE)UIU?*%X{G&}YlJzyw(Prs5}$kxzW!n0y-)J=(7%zGN&i{lObqCOYcFZ` zHwXI;*3ue0=G5bz>oTAZ?T@QBZKOD+6g^(iqRV3){gfux85hQ4=5;gynO0)V^A7!h zVgI4F{nzp6A9SlnM&LP?5;)Pil%= z=-TF8YHI&nKg%bS6)T1!UVBPlidsk9M7Wi{5;p3as8{&BWcw7C;*q_dH)7&ifB~VN zfImnfv(-s4UA*$XOR|^POV)iNiOEg&`7+`7LN#_kbJ~>I&$s^`?XJY$9oV5ic?8G^ zDlS|(oQwZUFg+;o1;B38dDkeqf~h^mFcK*Z&S;WrSpMM=9dQ3)WE~6qjW{~dE&s@Q zncB$zysk-AdzW*#ebZd6w_` z8`Povx6Asov@FzL!Xl17&^^^}0uOW?Fz#A7fpCzkSAwuk;Mh@bzBN1l5=ze2(3nWE zvvYbh9rly7UddffK=eev-k=`oRnXjw;w67whdlkAV4U6v9YqycV0b>bh7H&xf z;EvpA2;kS9py7=DH`WIk6O@@kX0lOxg~_a zJ?QZkPC!ZN*oxp`FWbJ0<}!5x_#zbXHe)Fadj|i7I%7ib{&k+x^YZGDyfv~;RJw#8 z%#|i!P|=DboUHC9Qc_Zps_vB2-c>2b*Zgsm*5fsBQahA%q z8y86c>N`+a80qN^6V~`qy#5_R5v@%9Mz@`FEyg@-ado1+vCfq#^tAzm(I}PIM)!9W@gDfmHPg*grRv({&7f8gEX#U98d8MQ^k9g$;nQ7-yJ4`JW&l} z=5sBU#PSxw86{FFF>}L>1J=q_jDmeipk-`Ji+|^+8^i03>iX=9Hdfe6rScUM?n;EJ zZbSWFIV<>3-20Ld1^D%_Edw}PL8PFs{KXS>;<`MMO9oVuWi=O3v~Va4=kh~QIrSwj z0LV+*!l$JuLGwUlv2fh02qqOO#@fHD=`Ypq#ubo zr+-L{&d7~=P%)Ykf#=?&wPkd(aFp=fzP(dxryUkhCb3H9wrFMJ8ZNrrtnF(H4~=B9 zUU~7D@2R=ic@e&d21;tgqEZvnvY#1otvjT7)Xm*BdA2qM{OuNbe%>W(Q#2v-sxB?t z%C>lvu18vyT)=c+C{wB`!K`L#hTb8EM80y*D`}bb|DoA>?XI9zqe)xvYZ+WUbVTKh zXX-WFE7*^oF46;@4)5m86w?2z$*`~gCA2!04mHOv%;HrN|9-eRN}c=N>hwOQ$VOL4 zRH2`Ad)_@>GN%*aosp8@%peL5&V||RNzh!&9)7Z6$NE`dgv9=n*hUs1(X`kv#Bj5u z)}XOgmAbIe)TT$qZ8 zP|jJC6uo`(Cx_Rz;(vyCEdyj2ixhxwVXtWq<0W>{VbpYQ&nx)N5BQeCP5Sn|L)-XS zvM1PTlNYj<@-YzhJZ%F*hk@_iA*P0eDKO(C&Js}-^+mk>ZOR_gPk6uLD36zBSy+F` zy8mOL)tg~xoFXlYABsFNGwBr%24lc|SXrG`Btl6M&_I$by# zQ6OBll-KpcvT|}`p35e7?0mrE(nOK*r^678J8&V#hLhO1>T7LE0b(}p%LrW79Zv_e z4tb@b-a?O9oa9<%o(=}tQBOpAbB3~*zBVL=42Js_Oc~9UTIwe|aTe-5h==z^8dXE{ zrj%;Njfo)(YGMj}HHri-XTKCrB+jnF;vz8@n>Nw2P^gj&ZD4fH{uwMtwPv>0^O`JF~a`r-0M@2>cz|StJTccDB<}8fYQ_5ve`98AI z!gF*62m1&mh|PysSl6f?kJFqQkN@J6+xgzZ?)gbj>aAC;EU={pw)cW`0ZA;ee_tOLkwp6c)Ee6Y>IH( z)T9xAx*TLYVXH)c2kGzGaf>1aS%r@KS?hgg5siwM=5_mVc9zTP(ceXrP8C4oVJ;p8{oj*B_kR~Hk1!-WC0tNyl86@{uLql7 z;2?XwU+8%eya3QiKV#WoeiMytW+Eb>+r*+py37%lm`M=96JvY!Wg90UdFO8KF|zY3 zNBLCgk_nHU4(pu8N$smx1l03+-Zx}^6aE;?&hb8f0$Wz0gf_6*qZ2WJIPq&g zM1p;HcdVK;Hlt6y_u$zYr{)^o(`QnV3HnYrjFOIrX7xh*@&O)(17go_9$vzVNMRJ_ z;eD?ia9##)EEE+p(Y~e)%JEh3E?nAxe!^?JHd8xx`h@T4u{kuH%=P(m2#f0R|C9NK znp&%|5?!PqTAQC0YK)Z~4$F0{13|X|%(jZj8j76jwm1&GkGq_&zr7KCe`Uq#e>tZR zNw$I5LNF>k-0mc0&d!>=I4!vLkz*ylOgRTyT@R6C-QS#YE*rZiRAb0SF?a3BlF6*& z=`}Z6QvNotH)pxK{Hd|Klk_el*Qo=Mim+TOq=rCA8$bEAgS!nbV$;uT2cLu<1(^Fb zh%%$kjmTd8D5&Qo!6Avg1u)TEAqvR57sPidHE}@eRZ=3Eovz^e4{m)A!w*Vl?-}r3 zNzFeo+qp7bVsFW4(I=TBIfmIM^-3WqVYE#&#M#AzC~^g2n$g1MZUkO`STA|a`uVmq zaSat3uF+(XIeCNoWC%b=J$={-vr^8qXgJqd)Z21G$Et|&$= z;5H36-o6X72S%osgh$mw!aZ=d&Xi*{FZD(zFnURRc0Cxl?%6@NCh{>jos;mtu7z=F zeKOAQ78ll2t6y$=BZhx+=zW$> zw$0kKAKBa)iu}LE?49MF3z`UdaBL!7oo`h8}qbcmp3pRfpq2c@*f9S7xKMzNWwkRzr*f>;nKx7$se`ppZgRY5vg`@6spd`UHD=?a%St z^A4|NnS^&Ox&uw(rn)`6mo)LX>a{KMxIBZKEVfi^ryPV;k`(yx_eZ$6&fioY_*57_ zNF~COIu(qf_uNrJP5id$ctk5#j@1(itR6g592h-PQ!&cNBT73)W7l}vPX;CC#kmI# zvctVL0*v1IIF6$)(y^(Io+pf4@vs;mAUTr{e>WjM;*Z)a!h7J4|2!L;5BL{MZtK<> zc&;9h9rCSyz|QG-%KexaR%#z9g@Qmm8m6%DRQJzg%Kw~&8@~UShW{gHH51(Qx9xe1 zk?+yj?lN;#{VyG-iLf>YMXp7Ol&DBnZ}^;F)ILh2Pd0($>9+d=a@P>ho;DbB@fv-u z2k4;j2>||+FX}B|vD%Z|8y+Cj_4DUXwQ+!Kc7Qh;K2`!Ul*uBBa zNGFbm0PoW$l}r8tU7+;>ILk60XV zORYFuY`*lt8HzbW%Pa9!Lm)GWYcfY@c&g4I=GV>1tB7H36Rl;?L~Huk-943&3!I8Q zEKF2k;+GSg$J;9AqciH3shK9rDyvZ$KIa>HusZ3buyO|6;3qnE=oI@k8usJ z@GmLLIKq+EiMowAAQa{Y0H?D9LulBSApnNNNUrzx)0aoKIR#dy@B` zzU0zLGGAImxBiQRZa2j@D1$N%T;E)ETp7UZ+^Ha0-TnMBTvcwW5Eg1DF+a$5JFc^e z;7t-~vIAYb|E-=c9+>u`z-ttXK4J%dAYyjV{oMn1auR~TR*W^PM{~rhc|Nk=L*fx9 z0A@EBsZaDP5l^9|Qt23p9(=@Hp81wPYf>VEyqm&JUg4Q(x2D4!;WKQoPXRdn>&tMm z_}Z4k(@6_$itz92Gc4#2Raddm0#+>Bn%6y&uy`if>BsM>N0YyPm=l&L;}@@@OnVK3 zu!xkY26KNSK}e0T3$3VuuaV(4d6W9(_*T<=`PffsZ8SbV)qmPD6$wcfu^fzC`G)Pd z)gIDO4LFW|S*En5w3@VZY8Lpu5{Zn+_7&{;Q|%}{U6LQXv6wC=-K`wRbn(EklJ(?z z>F!@MEr%84!6)I<@T&dI*9F`&>GeK)816a#ZmjiIWM=}$K4C2(RxP&HKMwky9_nZi z|5hvWvOz*bv`MtA$gQc|Be(5+7hT<{N4|BSOkF^!6|{RaFbSUAQd6hB%x3Z(G+r~s zXgRGiv}#4U{)+O9Ov|DAknfq=;y-_aTdj~`1!QpBUr@-aqMM(ElJ7XV zJePdf3>Mn^HZ5=s=J{J=r#F+ePv#mE*|FKH+#1C8cH~(t=iBXCv%Vu%jGn34+t=Bt zW$^eQ6_pL%GC3RN9d2(Gy+U`Jk=xF)`-RGKi&0{2(SI7+lzALS$iB)y+c}kgEdDHe z48^0j>-z@#nj(95!rfiQ>{R6_e#c*Zzsp|T>6NIPchgEFca_?kC!txf?w{r#up{ep z+XY6ThvVTOLgKsq)cYLT&X;c6=E*jj6fv)UAbEB8eWaDI@@W|$H6@FjrotMM)rDXK zFbw9?tHgL1nlrH+dWP^7Je9Pm`$~p(P=jBD?6SH%pA391esiDq9u1EoQq8&}fB4(W ziw>b24d*jW=;LV=N!s|r>UEUMdIWURUMwU*SOE%@?6#MK1-ER5WZOPLrxEF{ssmgp*Kfr7>4y(}i}5|8n(H8Xw_r zBRlpW(suQ(r4npHDcb{S)AcSd#sj?m{RbjnuqBxv2KMfflA_ zQMFF+I}@CK|n)Ik0>>!797SjRxk6<%W~L4u_{mEC_{rsu#w}XPq5FKF()q3 zEasSdyV}SA0&iL7BSpZP;e1!RmQwb@>zj1W9;}#zO9_22v6%@BAkCOmm@(KXfOgQ!{Rib2kTs(rGjRCNSt~7aYa7 z3EMAl;fn2YmFQXoxa;^UeWQ}f>zu`091;r7%=NMcIK~OkNh@89q3EdL{t7dzP4`nj zb!IKn#?0$|>};&)*Ck?FJ3;uTb-=%;biw(vyjrGtO=ID4#bkK`no8gKa)HV+3*uI< zBrnmQrN{WR$d#dEDv{W5-m|XTUA_oddLXdP5hwo_YEhDaK2ay#Wc=#I0@2tmA zPJtqsWR=Htx3t8Lh{&^u6&%Z?ShIa}vUvMjWaQ(RQnI`vuYt@Lo&X{d`9|sDD*k z;`Y;>2&eJCEYr8&^b#`U!k%dzQZ*LKTbnMY6;t;Pa18&Xuq3hLCFl0Ar?ux!#KV1l zcM$kk>D4;IPiZ+!yh{nS%T@ft5!yVmC_K*JX1~K~AXx-Q)=JSh0*gHl6|xh55KYK;l&dNTkB2f4EliKvP4#zZ!f3RP50$f9$LjB+ z94l`vT8)=~-6u%_La$6B>_fCntP+0qp~Oz*i-GPG=IZ>gqn!}hzPO2}|!8R`fj8c!Jzx#uh7#K-6Lq6D(i(Yh#vn^>Yxy z4u37r$BSK`6f-TmM44|#2(Y)iqg&%tj}NgMg1=4piI8B^nU&cRPHUa9!z3S>MPjx~ zz@e1=L|>wsZhJ1Q^$Xw9AftYo;L|4}uO~s-R}8t9>a{+$EWY8}2~b+=^Z}}D5f-g_ z{7?S-tCdtC3b@3@D?sBUv^PVmgjm?^{S-p?Mp0sf%8M6}Zi#O6nlXd8hoctB<|al_ zdJ;SVYgC4US)EZ)YCC71z6Py%Fdd}2{k^n%gEb++@yQBmUev?3;buEsS>90;`)eWn zneydubEYyJx>sdbZn75q=cf4M5cVtBj=45)gBR}~~dse%CT>hljsL2v8k`{lzPZ}|8pRRB}_-j(J-AyuB6=1}q zl+aEL;jW51NmMnAOkJ7-H;J&tnBYLKqq%Nu0IBq5~6-apK0Gv za6$(M5%ERcROlAaib?BJjaj(0RkZb=0fVH5M9;_;R66DsJM@Cn8k&XjqO6=cXSHsP z%wcLKrGRFFPu4-1-18|CM#ezbMNwm-dRa4&iR0l*Uj5Z;GZmeCvT9}Ww^z>{8AnFD z^Zb7_X8xqisx*G0gXI(3oDXkH``B0%clCBi_e_4txGYn@kIg(g1DLcT51H9eeGE-} zCf)2xTSG^WQj~Pb3P(lDd7l=eEHT%EF!Gjtunb?plN`Qv)Lj-5wvbGr{L`5Vu11+J zNqEjIL0u-xyg~A(5GT;^@blq$>xEaqi;gROGM1`PsemV9rTE2|Ph{s|qgPz=iM1pU zyhtQjYCw)w>SN+DC+GIJx-w@51WRR9<9P(k@C;3Y78W`~t8_pKpJ5O}i5^5;T=?no zr&`N-1U6|-&`*N+85XG5b_^3D61tT1@n=PL%TC))B5zral~%rge<)f7kMP_mQ=Us` zdn8in%7#U;Blx?&5vdxTH_^(@cf+^nU5nB=>3d#6sK>? zQbVl(yr++ol6raIdLaHf;cqp5*8vMKQXT~-S9&+Njj>m6Od^)=Zp9b{6@7xQJ6dZT zK8sI)Z4uc9qJ+PfDFY5dxhOPGE$dJpSP?5Ceek)3$~(NP#UZA1Bhyh=;$wK}CiUXN`jDYiv+- z_n}JG7a@^|nF~sNK7-F2vsBj!&QT)d0g=$3Hz}>gh2MU0fRQ-b+4&XjogwliOI>dd z8QiT^WeafrzLOlVBv51Spg|?$e5?15`6^ zpd^zY`HI6Hv$rieIEnkX*EAW{WK|Ey7K~^%|%3Lh_LcJysl}J z{=k^YGV;QmvH%~8i#{7!u3EOKZ8=x!3u=?N?tVSSWI&qP9`GhAIc`?1pws#GF);GK z-5u3NRaYg2(taD4a6t6K#~;#uprD0C*tGC`)@hks z(`NW7_sOReHEa&1-ppMR9 zjLY;R%+~Klr&XxlJRXQ9$=9dVEt;(^jlc*^ovsytXmUl~BhHZYxh2-n4fh#weRYvT zdfv({S>VUtPCf%&E7QH5o=3Ll^&|1*R!hhpa&Apvv+&^!*jDgvhU|JcFN2~PryS?y=Oc!WEs!;!%9HJS2V)y8A?QLJN0W_$EV(_ zvFqWEF0A&GZkvkoOu^BXK5e-zOcFJ~jVN~0E^UK~5ifSWOt(yMOstpub*$JO`Z8XBKRnZv0{xUav{nmwo}4- z4Ml%Md|4ykbG%Wgdky)eP!9PRN#6 zX`LSJx3V|qVW3J3rtsLKdj`FHQqdV;DH2)}bB1Njt}+xb7eVGD?I>>A6$ZGs{yD!T zc{7F{zkEY#GqI+|P_e-lSy9WdILwu{jpAKRx|HgTJy$j83$17P0ul>_HY& z#i)_UeG$=ciUdR4l;?si(X$!i;a^4bx#cg>F5)h%9LYTsvP@S`56O5;O*pf-hT`&; z-iN=lB$6DnrjT3tMZ?LB#Z2v0A|NDzN`*!wm+m*%soIPIzF=&W9TQ%*@m-1%CHCVc zy4^nL6;l3(0P9~^tZn;20|vUuoBHnqx;80)QcU)jdb~BKh8uHxY+b!OXH2FG+@Jg$ zY?p84>9RvOq+iwc=>gHjmj&INKHHaf33$iFAK`O;GwY6{jXVLQS){p95($v!P=oyXqf@SrE%hdU*AuMgi>M8mkEQhmtdj~OT#)O` zd0n4~o@?E8L%%=`i&06mmHcXUUcheK@2E7$^pfDmv^ZKlU(fSh^smde&oDbU zVJJmng!ixeE4=HO7AQhr2@iGRG|-=>%<2Geg3Ab|KN}?;KIg>+z(Ij-VJZJsDZ#4K z?1gjB81r~i(yw-E>GLKp7ZTk@Z{vf6r3uTak>g8FNnwx$ZXVz~ZpayfD76c9bZ}=& zG~A8wTv>{rHJ;EbZg-VXd*T_^Bwhdh2#%>y0bP515n3Pk&Ov&2@V!I%$q?4NFItv= z`WP>j5TdWA_4ps===slkLiMPVD1R9|1j%0JJuGdEl0)0n>@!aY%Z zXbLJnHsgl);UsVhm)6_LOVZwG^=(oTHY6!90TEf1ifS}RLWuV2FLvzjY5!#Fk@%fl zwEZ`tvJR*QrrRdvAIsx0eoO1I-I&Fpyvi^bXdAv+7+S@!2tk_f)G3=>)n&KCQBU7c{cCaV>B}TH zzM(+_(r2y)S$qA?L2f_#k?yvvz_wfZf4})WK(EV(T6!u}of;!uKA@l45f2MD9hyZC zS8%PgkH+F4Em+zFd<+SO&d8D-9+(MRXY}+l4@^AoLk)>+)9i!2JEhEEhAEuN62!5$ zKAbVecjcqR${g^>t}>%gn!CWQX76Vjefm*1tz+Yj>2W-K zy1E<5XX{z06;p39Ks*HWIP@XfZhe(5+VYj~1#IA)_A)-tG@7)%C%pPX*kXu=?c+D!3sboaX} zJZv@5GWdu8EbhBQ=0uAmlrE9djV-m;@{ivjvw?E+Jtxsp^v-c#2J>=g1dhU8l6r>8 z^^avlPDlEts zr%O;tB=F3LFhr_Tc=%;4D9*pQ>(`o;kPmxJh6*V;qZ{?qcetfn0e-3 zgUCljH(h2jn@Nl!Q3WzedyJRZ!;V!6(1;X9H+5r;HT&i}=B&$EBqC(- zSGEO#fgqcu%iXqvnf=#$V~qM;p(|WZ|L`j8(+;5@!#H%Bn-M3YgrLDfs=|$@8=I4C zaR~B{`5xxhqL`f$z4784(C0?NU|eP6TL3DGDXQ3&Q`?a zACvKhcyqTzfpa;8n7r{A+=_lrtsw&LXf{5Fd7YC=0op?}U4qv8tT4$Vvw-6&Av~j) zAVGfGIbqJ5NYZ2=zZg7xHg)Iq9e9uu6kn~I;GO7x&JLj1e{+kwdPcn+aH-25HR`Hi zw*C(&DwYhTRHT%TE#!{g6GCVldB?}UP~eO)PHjd1gI8SlA>{9dZ8lL&O?VlZQOelh zn;sCx%8q!%EkTTAx#6&IFLtl^gC_9x4}!N{>OpHh8eiYy&^#tWyvR}FO>h)+ONPD@ z6rtvD*~j{Cm0zRnBy9T)W0&r%rZUc5kSTBvvL)PPH|&5rzI=QvN zD|pqjIKyLg+7d#;h;Bm;ngSLdrhnN?Yol#YT$j#{QRPp zfDTI*ze3`eKs>{gD?A$de#2kul~3g0j$W9HWH3vt+yQ4HBAF&I2L&^j0o`!$VfF4W z;S)PH;qe#8ZFc9jU{#SnKqf(L|DTRf7yDt>5W<;`fxob{C89;Z^;A+ z9u!Z)Md@7<58ji9L(qXN1|yV|_?yg) zwBN{NK2_%lnPfXy==9NVt(Q^G&%llGhuI>9qp8DeI9U`Fp8l51w3PEuA938pZg6#( z;A|h1o9TK)$QI}0 z7kdJyM0{KnLbM4X4t(l(huCl23S7GQE6BwoM>4vA3%rQSG7L{~1Lnp92D@kcfPoyz zT*|J^M}a7y;}&mRBF?;HMGXrVy06&E<(u`5UpE7Gy2LZYq$aNothz!T)tC55cf=zl z)2F5gozsVJO$mVb-%9*WJ@^5yM9AdI5dML>OT~C7Vi#i@-$?{)LKL*^@eERGg`o5l zz(gVnYWip8FZ<3b7QdZlL+vR80_~hLUPtILT9ZhPc`2gFigq^3BYeVaEJ^%Je}N1W zhpg=OA7s^UD3m7!+3;_QY7pxUGMZ$32VbBq>)75JCEG%;T*`QcvVFg{0ylL){^-I3 z<-k7D+t{2pWf7s)+}Yc-8oApRkQVSa=3Q6uyn3Ah1s2o@>C-XK2=-LcsTbIOC?z;U8*nEVMoE%z=6b@%pC}d3a>2bH|o#t z+(=js^%!W5goYMDn~+`aai7Jg8DUU2@MN(=}a&FZmu^#SDd7bxn{3k0F(P zk|+_lY&Z*wkyi2ea zNe`V+K;o8^)ztH8MFASCz`6Y&-hU1uz5W=cW9_m}R;u$K=&70Xntgh>5hNze+oYC$eR<8R%zch@?unh!%%8KBQS;mCp$ZeLEgO z+{-=W!FXS|f;z-BdW^H3zt}HzhL+KKcJWE2fJ2yniTaGLjXtaA_!Sh1bg+&?jt*`d z+!Xa#D(%UY#fS1iq|nU#BGROiM8P+k?Tq_x8iONpsJoyP&XT@hUdZYDfMY1|yak|E z{tBy%Wa|hCOB@y5?G!u*u#U!D#vx}kBf_JnR>4!Psw2QTmt$!R#SgA;wq$sMtYWIn zm~9y>g9nD)Ypo+@e_1lv5ETtv0KgL_+mvy@5C6z_kf=Yq`S?f};}QP?FE=x77r>pO zSinr7ubhRbJ-coYZIrqjJKWzzns&kMQm|n}1y?8?~%w;0N(C| zBHm1)Ih=UI)5m~ekQHv|B;^m6A7VSA%^d@5Q$puxrXK+U>DTS*i8IERm$kNeId71i zHm~-2K(qC}@v_G6h~e-$UJXFRNLvJHp%wnee&fDhg8`b={=bN)0|-de%P`O}lepvT zet5!H=ouE7#6fQ7y?NU)WV_c~gFg{2`kNv~Y)-M}bvas5KIk0O!5TDYU7$<8qyVdC zQdVE?V_Xz8CKFr5gw((-b&g6~uK%+Qkvw1>jlHL|*Juq%`TKZoB$J4_8!3i&Na-Nv zMLnBy)Yw4Iqx+pnE-GtVBYdg{0lHrzeV@~H6j?#?a~c3n{_x)F-pP-SLrl@Ezv4>2 z0xJ~}wc{{jo113&nhIOs`105aTD?}|a`YM1P zGfYnNd_W~36GvKkj*oqY`39gVW&%}HV;1OcyS&;}Al2xhIX$%`gS8aI`Ns*xaKZ6}iB>)gpq? z_XfH8EQksdf-YZ6KN3C~TRX&4=hWe0bl-gITpo}{U`jaHGEtWWx~xYMdv= zWCfdVkC{H#J#&amN0$WyWoX6CDS<|b#lsW@TxVcX5qZXV7O10GA2l9_)o#T-szMvc#4>51W z7;Fji;A!S4^=}Gud5a-JK~e@9t#S(T2n>XOQ-x&c)053=;wQSJ8*e9=gkPiK=~4hk zT}z=T4;dA57_CDZ(_h8q>EW!pZPHW@4ri-k1k*$gBZ~hI6+P(Qer{1%V4*9DU7u}* z5k@?Fzvag83sPNJP{8oLt4Y-~yK_j_xX#J`cl9;ty7PI>&oqBX*bc68Trxs6nDHMI zuJ?U01?)7%g1mu|sY{M{AMIHN7SR`1R-a0MnFcXq)^d64uVoC=Ex9ZelTikZLNx)1 z8I*3w(&_e9=xxFEi)uqN$tXiO8G+CAg?fM4&Z$oMlRVS+1(>GJ3Zx|PI*ZU&?VYJ2 z*2WcjM4B%pUE^g?qB-KBqBsbi3Zn~8Qx_S4%2-;_?}8__0iQAo3b~BCZh$ylf5Z2r z%|R2FEQ}r$ZowGSq>v4WJhrJvj|RWXC~ko>Kp;FKNX8NHQvQYaseWw6qwiDSafkS@jc-!Z~6b|voi2mSa`@2wHC>GEP4KcqYi6#d(!jWyNW_Bj?2_U3_V=@J6&WY-UMem0W}P28(tfpK46~Om zBM@o0*!d~Bg6__GQ7_>zzStJEBqSg%)<<&nE3G)^I{e;^DoP}1{i%F(H8Vyvs4tuC zUF@UgX8Z7mFpMaNvfZ^2!2{3dF-*Hk^;bh9quv#NPf)3JzP|GS2*_qWlwDD~Th*r^ z82++;z|Ybi0O7q!>;lXkVif#gj1u94{)9O;ha5tZB+}=yhkQ7$u*99z$A5wG{@OsM z%s?YLmD~=_IzWLOUOWPpY7k5RAal#d9=GyZUD?U9*gFz>1Vs<)S-dH9c8dHOX= z?}azSgG%Cb1I+aAbMzROI&?K?Q$`RV6id!}%OR|GSc;1E7{M;eLo_x=o203~)>Vl> zU&?V-tk=W zyI%sgA16tUu;Eo_m(p7|rLaen(vK7N42w(00%9V9HP;db3<=X7HMMfw3C~{(F|Laj zkXuIuz_WIwb|a#ov5T;T9sUtWe}g@hG>xS*_V^6h0v=iU&P^j759~47gq}fU@G`qF zRgYc6ze$SLv*4@@v^a^hPI&G;-bbIEMb5E|gOyUB@rCA{#!MEdEPErG){cgi+(z~M z0ay-SnM#B+eXdGzGf5-Yc+7|w#+zeg2(b(-;64|>^rp9@^$xL0`>2iF0pqt5(@v&t zbR<#~53>HUO!jjp1+hP1lV0U^Yl*J6&2-6=f0-MQxy;MRa(jT}6l z6X}-{Psc~CU0CE!Bk9=+zbnff#r#AKw4H`TxoEsp?#xL%_~g6ai0mY5o@Xon^-S!* zdOwzo?_>K%q(r~*)^*Ask3ikiP3)@q@xjCj5wWV@K3}+wTMoT0W)Yka0ro!BHo~ytVBFBv7$AL!c4a6|U6N{?nQk$}|GOO|BV|k7c zeQN!w7y}5ZjpZkWdR77_Z4AEAP*QMTE`^nD>yjV!zDE`a*nz(4ve9)I4wD}hT(|7< zgruAIK>a~v7i*wIZ7 zx>%2_53on=GK^)=E1Wk9pcF;PE8it`#68f(c3WpESOD28-!~%rdAVLtc|k}!bw!vR zGH1?o;0b)5YA;DA+L;?Zkd@-8%YE2-&fYod^}x~1`}$(I{ky?X`UfpN4_NP!?9_PY zg~L^Bt1K7D(AQ+ylbhi!Qh-PJ%f0Fz4>7`)+n{QuZw`BEWIC!I!xhbvlHm8xd*$o1UqQT)enENg7kmX?v3^2Wx(qGw~9 z@c-7@y5tyTB{d9ZZ^A2dmr!IIi8ydxYnuaFCeC<0=VxZxEs-T6{c^t8n`%UKuUJ@F zc{(g1ZGZW(5=VrEXENS?nDqYO;kx-IN#rlHl8OrNz7e7?E-NdWh|fA)$h4My)VxX^OYJj+E3sipjY#W)qh8@X(qYK-$`P=f zHMi`1J!?ho@b;2At7Do>br1kc~60iBdJ66cgGmL#%zV(MtR%+5WSZHL`0#E zN;0ei@s0Fnd5-512X)X}dE`q`!1h7Cq*4rK>g*W+jvYtbJ8LhqnJ|KrM0A%?F`G)D zqKHknE*U{yU?FA@XO^CkCpSNPM07~F$P8Y!6;T!#7umot_yX=5UKRWePb01ZF^Ku! z_vY<@tPe48R*Iwrhcc=TaKH@CIX6&R>E%0IRAOyuTvAh40A3m8&;}`y4!7Q1CyOgc z_Jse>fm|StM+lupdM}g&Z_em4R>nhY^Byy`atJ_=6&m3tsb}9+Kt^4qtj6KsP^7_& zK+*e(-HKs+Y1ZLW3knO1NtA9BH_y{f?$uCEyL>;OHr@;#vRii zxI0Lkm}~g1jAxmynGkb?CVA)l|KLQz4!kFvb4&-#!NpLH*CEB*mYw<%0;qmI>eXv6 zcSb(0*NmkU0WGX%jh8)sjB+`lwi)&xC{y~hW%)4hp&M&#joVbo>8D4i`c#D7#D;6Q zvJ}XeArX-4(+x8OQZiv$(BnX}8Ufe*_oq|E*SF(8ie_ZjHs;>bLbcB8>PW{Mgwg=g zf7*4SE)#xnaWQ4q^&5JEphzl8JRI|8b6Z9FBXmrWlU^sk7a=1f2^4mUmrRBJw20F_ zkM}?0|0>b1v!Ax(IB}9A&ZNsWeDtr5<^vlbNC|+OH#6!ZnF_UW$IILT6R34}WWZGW;%0XHCb$Ut9 z?n(5oQ_GT`!XFA^*WlPTNoe;C4n4HnUFrGs;`!geE&vDud!P9qf7=TD9_W2jxBpZp z6^6bT?TEFD0Lv`({6kQ8p@oIc&N>*+h=$iKf;e+*_iwH=iui2i-HX_NOqz#+u4@E& z*a`t_y&_n7-NZoqhR7)A@OmoT)?)8nN5o6(r-xq}vGED*3<8JbCIdmnJ;Z3`w)5v< zDh`ZMCH#*CslGeY4c)F+_$DU~jjHqN#SGpDH2Fem>IP^OT2#5Q=Te!)jV<_06+iST zv<&}zjyJ5&Ed34m`H4=MF5V#C@bh3(K!M?`d+{kA>CZd)1sAEAd=aCHk7%W2hPJ5u zay$_(Z&h`6L-WH_j-C@|i-7eEBa@Y7LKb^Y!x3&SC7wwu6D)fHnnd;J{1~iQR=Bp! zffufm64QhBL}Ek=%@S@_!6Y_}RhEu|axr*VpN6O1&T=0(`UvQo6`Klf(CI!w0=o^F zHP2jjK)`|>Z%L_LJV9*7CA$V*``Y5!Jx1LQLrqWhxDoPz+|mG9>XMaffYnYme(p(} z50Zg;!jfJq%CqV2P-~(WzYOSa=oXgaO%Ht_QMRMA<5yLj;VGK`QFIj!O@3Wix=WA_ z0R@p}(kT)usVLIT02!mZy9S6TFh-|8r5W9=vXMGEhoehkOa^@W{)P9w=iYOk^8}a_ z89v&e)v7#nQg0VTS~IWze57SKk^(rj6_YuT&+67fGUC>BF#UCThtj1?$V=e-p&Ob$ z@LP`|UO>L^HFD4Q>CZG?u)>MT;<~4#qDS`8`mM#jrHcG*xG>)xG`#(L#TOSH-;l4> zH$QMoaM0-jM^As=;S#MwWu#g{milT@xHRKMs6_T&EQj)bX$*9EKBvvyh=G(}tJ|K# zDE%E1@C0F=5o{B~o33Xi?8S=#-X9gi^sSLU>JGDD&|sIpPf|T+b)}!*N(C@=nQa?3 z2?Bt?1531_y^Q=+vvK-EKbHtW%h~$%N(YKA^x*r1eyy4erkO49Z>uPB{NieG zOzSg9je1m)8TDQ2H!0kakF3~7xwAIgN?zAbH|IG(1!76`Cz$igP2h39#Fc+Ou-c#= zUVNUkz;!brg_Y{u0Bl^|h^^OfsdW~B5?@G-qd9lLJ+DioPLOzjl_T_dT6RyYr0)<7 z8Tm3$gi{AU&m;k{|D!naZMu1x&DX%&U}`w%RO6jf3XjDdxAn^JW$YZ@-Hw0G`rY}0 zzX^hNOYir^i0qaZmk(Hg79MtZxxSZHdRwRcvck=1iG9O!B#q4?EZCT($>5crZ*06r zx+ZuR{bk!n>?`i--!rC{qEF%_-LdQi1JUSS5|ld8|5~ zc!GvW#_PuU?{r`J*Cjib8)nWD&ZJ+z-iXSGl)D{NY#$JOg75uenPe$vDTDj~uXHJ{ zMBawGH&^CX(kRxkKOlG8x+Tot?I*zLBRsjt_R0f#@~CKx57 zWb^BEgSwX<6bE9E@~a?qW$Qt=bxb9ZJZVr3yxbQZeYlASTQt9S7C*x4$%pg=glB%I zf6bm4vQXS|IU7tO{T1dW)@>PdG*6xdn-b^_(PPSXVV%EgeKNJ!naDG(NCq(dvvyLK zv$3ZQ13?*3^AQ)F_spCN$7C<)OTL*VK6^EdWtdyS(TdR=L7E@F3JN!n;k8;KmK6Uo z!j<~%0BmwudLk$N(O=$qUVy7iaWM(9lD=KSzBVqq$HEs-hJLK){fy1b0ujVV!A{MY z{jN0hUuGbQ?&{tHw^FhUSw)sH5WA5_ybZs-BdktB+q(cUT;`6po{iGAAD;Lb zPc+QF=jprNM~iag^>JvhIOi)FV>krLdz1eClbN1Wwc{6iRGyq*y*K>rfs#c5f9i|BAlJfaym|PX~ zGG|4Kb05JCz<6V=MpfyhC}}K91*j+yhAnH(D`YrB*bVSzM>{UAc^Vnf-M~)9bUB%V z_g9fuxxMhenAgWf(sftKO_@V@^0Q65B=T+nv!gtYp;486D?q%2Jnh^F&J!&5in58} z4ODL2%=OmE(W+0zp7WW^qaYk~F5|L&`|rW($^6lypmyA5ESGJVegi?#!>cyJXS+?> zlv?!HQ&=ef#iC)*KoYK3fs3OvW?itQC~vk^Vz)8_pyhP2IDRA)_l2zov~gx!hN#Rj znj8$1v=S7PL9@@&?4PTOTwE1AAS4I+PUUucPOm6mMz6KGR%82E|NpiV9Joa*T5S&TM-Y{jQsgY zgk2>_%255%Zh6B-r@D zKVQ0D=s;Az z+zV-bsRe%VFa^2y>D?#6b`=tP_;*}!eMMCH2fLm7Bbmlh^`F?t$k=ZEm@ATS7iyyp zI!sz^b6XCC_c_%oTQcN`SooRDp59D|?$lu2MpYFzf1lVY%f-!{8DU-HKK1qa9uFDw zvWIJYrE|J;3+j60YS{P-Aeb3lmWtdeS>yH8I)}3%T%;Uw9h^@!b#qO$NG*;j9fNRr zELMIrl{6=tSJ5x9#H;N5*cv7a$iu2NU}XUH7qBjgBcE@h5`ZXgPJ{;V7i1U{K()U8 z`C8S=ch%>;ov&~FA;Rwuu(R0MU$^Y?vWjuU$x7d%#XDW_zFP^84E_31VPd@juw+#1 zsCwpSJ?Rb115-T1BS~qIgM$GQihi7%Ri=X>ptz;`A^_sqoWMQ`ZOOpGV0Fiy-q1cc z$?0OG`;Ck}-y+4W4#eVG@Rd=Zqk@1hQq)9OdI zjK~Dt95*Ytowg$w4JT+VlCH_*)@8kB`Vwp}`w717XRmIJsrVr3Rq+L|S9mNHhSS{N z?)$@Z=M)rS~5Kme6NRwUfKDmbAP*lK=fCDsxCj(&bp-?}Izym6Puf#0$Ea)t80A}MB zUE&f-6fb7|b?31`j+-q+T3c3pmSsiU0$z^-StPrCRwJY{ND*?QMCIHJgnA<11virx zJ?lrD5_BJ$NN<82#B3w+6=^d=iYu>j?9pJktNv@9h6yVRf+D1}TW9tBLx)Xb>CMHW zXX}J>mjanwU4#D~%JkFB+m+i8To5n3=Tw!-dfV+!t>oSLr}T?d@v_f`%~bzpY4 z^g-$MiNvycIJc*ftvi_lAQkXq-oY2YHPC;#{q@;Nd@us-g4KNuB7M1=^vXf;fzX@N zgQz3QmD}YFeS8~8v?sTfDlkliFVWvXR-jk?CO}itO7<&_gGachXBlQ7Ut$Kaf>@aI zJIBO?G&0Q;vG;3A*H2Lb4;^knL)6y6y^)g*5mU{1-c6(I{F6DE%UL^FPZeo&|X zt{Qag&ZGnq|BOJs6uXP;@yz`GY(93)hSe|Cne{`dRemTwPfHC_OUQP#akLE^8-=bUFIPGjzL?6LLr_PYhWL?S{HMfo2l85Xh(~Gv3p2QlC9Yix zi=YNaNUB4RqMgex&Ho5>>X`!X@jn*eu7P|{UwY5#4C1@_v{}3mHW43Nr*mP>SN2tj z`#0jE+pY`xbU`8Op7JGrI4H1XA28|OCFq|01Kl@?Ug>Z~}Na?D{XU0Z7xZ>}ftuz@U; zwIw+M&antahDvcTF5+}6Jzj05Kk+Csa^{wouf*Rao!1tt!Q!j&+=xn}S#L1o$-g;a z1vduix?wuu$Zfhlth45m4)28rS`FjA9Fc}bL_Opu&5?UG17wF6%vJ@YDc2VJVODoF zL4(jyownV35zi_S##**l=;8rW6<;N1CeukwCF5zOxCgNE^fSmpc(M3Th4R zwT-*Y{I^r(Vd@X&oa_i+aI0D;q*JtPxpljM(l6p}J0NpV1}BsRvg~7RX-Pym+X)uh)E1dTz4>!<4h- zRHL2RfU5|wOJrHcyd&jBiJ41IcB_O=?(4SKpJXs`#5tzAnlwD(@pwI=u>zHmFiegA zVQcP>9PG@7j=Lb=ptd^OIV7uc_=9=28-LaPVr{5I=p9S4H>vZ|$d!v+i% zX$a_K)t8d zN4N)Y3(VfeoQbpwKuU;~@OIMxi|5emyn(D3n8)Xh%#F`9Di&e5MVTkj2eG~KkqoDK za{TP_$!*?40mEBU*RYf6KPwB#VPedM&D$i^F0Z?&Fx{-sQP|ex??rfxR0|i7e5?G0 zJN@z!N&G|GSc#P`-V~cda3CK#$a(VpM6CHpyW61tF0HJcWyNqIvoDw6VYtK*JlzD| zRI@-DS;^O^3aBHhi>-S)g;3L;U>4#nwNxBdv)-J~ZohvRNiT(ed}__{l&_w-+wxd> z!ylNZ*0(w$+@_e+APE>60KTrNvh#HMWNk$X5S2+i?u2G39!<#k{(W(6S?1idkiC)s ztZPd3wW)rj!@gyePLbcbLpF%#@JJ|DlAyE4Smg$yKO|eL7jzRz;_Ih5**~HMyhheq zF414c+w9CJXrBxyzcd9g?GmQ^1VvxwITbQs_xIpD_XP5U+DWly?LFC=U%w1~J^f|% z>y6(qu=+_>$tydPpcz@1_W~jQq=|TGEAM*VrD#56K_&9nE8Y1`CzeVTS31loyCbsE zmIV#*yr_|czQ{k)D=}IgL2N-vfCEp8&S4zi|1W2R0k~~>KX<&Oc^=X4H)Ejw(z?x8$bwGsxP{Vh`)!3 z<3KPRWv9S04!~mq2h~g3>A~YgE0;VorXJ^o-bwPBxBvN)ul)-IuFs`QSS{n_{T5`% z4r2}dm&!OM_9yyi3SK|WI`U`TvpSxM83m2PV_s>P_`QB2WeVf?xC7o}Afpmr*5@2n zFzd*jxz5*&(u|SlXHnPxIufsB?d4`(Nm+JPV`O}K{ixh6*zw~N3nY~%R(AHex0a|9 z`dhg1FW+^ev8JP@lM{X8-|ve7l#S&~+SK;*lGOE7tqL&en@MpzTC`?vAsx-i={I<3j>0Ku;QR=@IhWI8KI$Qy$bNq7f)=!(barV}{}REaPvn+K&;E>0)++m)V-PSmzH5<6=M5puHzeII>e}}q$u2etp zhhtdZI3<~0fc210$J8R_Cj9b|;3sCOY7=Tho;mVStcSC^?UfzS2gPcbo~+UNNx@Gq zV>DBB#vAjYWqU@*t;!U1cb&dau4;^ zEKXywuOc=*HJZ)$6Q`7(Gc8V2I;Q|Fu$LVyBk)m}V8S-UeVq65Adb)#yokY(}9R@yw zaA9N!UU>D7d_#AhzI(K*w(P6(a^JaKt2Y4D9h)b@6AY z$FzmDeo-ln@}ht^0P(I*jx#6bXYD9|zIOuEPI?vTo<~!7{k3;&vOw1CNHtzRPq$P>=DEI*m#_fB_3rhZr>~K$k#b<4nFHhCNZ$(;m-c5A8_anOR z$5Bkv_y6=QM+!5qd`%eZF{3oUq=d)&`qi@6L8i4diV~D^r19wGxLTVk zSfxW5%rnFME`1-woOoHitQ?!cF;hiW%ezc=@X>Ki#&fCFKme@VQ$-lxo==F@3)Q~t8J>=Hq&27Gx8(eFK6Y4O;w)Z5Xhgo68N|d?pL=D(T zAE!xJOK1y4;jU$M&v0U}Qu3DTaW`*;9$U+De=bP#-NSfj z)*DZPi4O8<yE>!9Q%j;xydu`?zsvEabzJH_#V{IdA8`gS^|C z+8ByECU%ZLhKQW3SnKQ!i>^i5J^>YFLjgSwUv* zOlC@3SVOgVh+<5U7gOI}nYJI6w z99O>0M0xgH)TVN%NEv+1^6Bg)Wt;%?NAP7BwPTC?jT~W}!%=7M zsl+8}CDw?P`Y+e${`6jqM{dTs$EVyydk_{r=jEHo&jLaZ*2x}RdFt%9%82TmhCqrc zoeLV~x^!AdS`@Q?ICKZ~kPj?I2r6k zYQqF79bCNu2TgM5?wIaDDysTP(6;U;&DRvZYS7e z&QUqLbjQ9QVfOX*WS#h}W5ybwPS;Rm`$uf4HCJ0@*@Q&cHRZ0w(xW%7R8CzW5j_w` zu62I4H8o?3OCLHX~YPO!};v+i}-IO*JUvSJ;VxQrCGBQ9^)qpa zdDQVGP?k8!^hGC;{YvDy(KwI@P+xSNhfYTXo#zp51L=U}$;(s{q|Byq zEci5#WvbtejH4>sd%})jvoGUqjp~nF1xa!)Jk!fv27+=BRgDn4d8W(RE;X zk7J@!tf6iY-{=ESWkG$7O!9sC!ns2;%s9m55@$QEbC<2M$OaAKmX4m6uH*rz_m|9) z!AHv77cIMIzY;jKSkqyWW38+LPakzF56sU zk8sF_mp)6RUkQz)RC_NrkB}I*Pf%WM7_uEASq8Ro$L{+Htw(-)Uuoo?f~;7R0Lpl6?O<;lHTEou*31 z;#D2r?))61{S)Wi#KP%E#Sm6F@e_g3ALGli@Q~R^iOJ0d8lvDsQypzr$_5Vge+D7J;S(q9{4|C`6|F;mMirl%(_-gM&(+P5wv;j-1LVcu%rS z!W*S_*eAS662KUiL;d`b%c~jNJ3W9pn5R}m)l_9#nqR~W<>^(E>eOO%wl9z}Luyr2 zl^ZV)xeNiF`t4N)5p12sxBM)RBJu^R+oRVQa|+6R=6qoVbta30kn`P& zAs2y1!JZ>ZVfZWZe6=rhyf`1KL$&|p4G%p>vdH7cw8xoy)SgOe056>B=2c&Y0Wn*< zb%$E)TTYP*Ej~8R2jL-c!CUGfV+~>GAsmNUOFN@rM}uSIB2eGv~{jIqmWKYERk!I(NGr21x!}YLJ>qt0q6qgG>GGGWd#`^%fD*%uSxnj*P}1_`k1!-; zawT46_p5^bQh^+_d*#a%mt%13LZJ@x&b>UGyES6phnPE7bvuTflA(punmIgJK= zv#jtFQWukWnwDUl57TKdVpx5GocIK-ZgIV{%9CLUz+hx}m7q^=*K+cV+T}~#Up}X? zFD`QxBVyh7@N^BL@^Nbg8%bU2EHctX zTwAtgUS*qO;D(WMkw<(2B&we~+A};cmPX!@AbCxEd2yScMkbVa_0O{p=AwA-&YTd* z)qQj~&Z44yg_%6Wr{05Cqu$7C#=m!m*`XJx9O!K{IO^nUaBogHEFoPrCip=c{hCm- z`qGEPCwG76TVBiMy7eFC@n)!}?Z+feY^MS!nm_lkYLbdSZLnJZ5I|-*SKY{VJGb)}#5hx2QAOf%us1)*PN$-@|MZF^U1?ofO&;ACm_&oZo%) z#eNT)nOe;3#29WL8ptnb1j_T7ZvJkdTyK^Zl+_bB`S+-B8$6C>k4SU$5T9E!+s8(K zUpXo^E@vY`qjBALZvr507*DYHT*T!2F~~8?G7G^Hr0BZRG5lg(&rnAiNqfdJcB+1H zqtjFUS8=?Op1E|&zu?EuFb^n~hJMTF-r7JWberTv+{~?8?x5^J0|^kAS4H2kVD&Ck znB)Fpgr{#f3l8)QV!pY)xLHzP6_2l~DhPVt0R!CLS$xb^hX-$)_sfmbiDwU?6G-J_ z7JPhnRkr-waE{Uh#hTM1izewqF>mc{P2}+=bwW0&vVO?OhsWbp`JEgx!v==6B=}QT z(!La7{o@YhVCVmL7|b#Pu;FpHVc>AXN_VJbt12(thj|3N>OIPXQq7OK`a34s%6c?3 z6vgv;N`c*bQbppfbf!)eX&(YZCCx;cD&0H0F0XvJ$nv;4Rd0_wOPdE9*(Z%9u}%-r zVu}}ze^!+_zWR{4fURr{yUS0($%?K`(rlHhGy)EcU8fmyFPf>%t9@Z81my+#C5A+`ZkJ{a+q=0 z^gg6rb;HnZ?z16YetFKE0ZKzh4esCr%qR@2%HP|4`SW1_ZKTT$Mt9X?0TtWVfUyfx zV;+;1P!s429R`1`_Sa;}El_i*aC7uGOWXO=7tb#~k4Lhn9>uOufhGu;6CLMBECu+( zc7sA1wL73yP(j z{4w-kSAbJ$UKYwGWn$!c7;4Q6k7j?tz@M#AK1zR@Bb7roOSweJrkTX}&`GDBKv1yc zgEd!QGoZd-^iSJ3j;1$LO}P#Oxm)Mtz0})X#z_5Mq<(Ae)IU%xca%x+d!8`Hvu+0E zI0{33>vPi~ow)}SfO32k@mpYj6^t+Io3B&;(bv{ib-(o96a_jLS`Im>5i{3fv&+^k zO_c}enAlBS=lI8mKYZ#sTo$Sj&s1VOet~*h`MoN)%1&`ZPEf7uMG2+jCyY|-g+56RKFTd?c z4(H$Y?7zPfAD;K-1zbeWjJHq0r}566_D7B~8ny5mZ9;7}>ye@9-#HG>gm2Fxcw;;{`Sp3p%XdC} z9u&&0JG^_TtrhnaLO zRqa9+qw+|9t0|Fv=$o`z{s4x~jdHz+O}R|=tPQ8NN*H|NqkifW$Mp6&!RkO{028Gu zO>P5B6ME2ct2my(6p!yqElA_a&UFm6w+guLCsO79fK0-%d0xj%xq!=1cXey%a>l|# zOKNQC+!GcTI1Y;Lr{UF7IHdqtufttZ-9r7IZZ*s7Z`5-Y7zkK=cN4)T5&+D={-Dvk z@gR2$Q*m=1bT%XWj_dqv!`f;1W%seNeV3d(k<+WY0~=602L%K&-*Q}6`{&>M1}3`QHDZ>>$Z`?0M5~%!qM8hQzjE| zciz-1!^7DBB6-q0Jr#)=@{J@$a88|*Not!xXN}fx z$=l7l+)~^cfgBhZsr;)V?uFRUf9IKUY?13;RbT9k!!saO)pYc8lk*2I?+Jodlq=%1P#z=y;SSL*CGJXDJ;?{_*rB7M6?#+%_i+LFfL79f zI!T3REMCpHn%V+FFKoo#@GmRNAm7vm{HtF4RhZ0}GMURnR+Kb@a`I^yD{G4)n6lu^ zXmoc>%P}mH+RTPQ8}N({L<`4_?XS}E6=TU3(J>j(Ms>bj_j2jN#}_LNa%Q_C{K{!W z_IkB*4L<4a`SrOtE*4(}b+ij5@W46kYGpavCX;p|wv=APC)iNsyRla9vxSLWu`Um~ z#k+JmIkYCXDtr86KMmfa_9P56;=quWY!F7J=_;jGO0XkPnx33w$@UCW3apGfL?mpu;Y}vubGmYYxpm802i#CpLtt|6Dss|{| zgv1=%AymHd4wwaQ!YWblTRlQ8F)PG+NULK?Nrc=3!Nm3_Et)=x*%?|zt&Q=2`S|g{ zl?R2eUB}hO42W^@Z>aRLSi!K;ZpWAk)bOM``!e$vtVgY+wcn;ZBd2g^lS03 zr8Li}#RdTx*3MKW)#{w<|Co%bp#_~QOxkJzo9PJ2s@l(_;`0mhb#&I-xNgKP<>fLy z8(-LDKGVUw$}I1`O>RIV#~jsscof7^pC)~!vPj#sWKv!DFJJH{^iyK5?|lLZ?s=1; zYy&^3js}1<`uywsA$=!R3k!SAoPWEGuRkInx1HVj;qW7gI<1BszTa?24*OHyoSy5` zYM1Bkx_u^a07d^27U_-%yi4`kZiXvn9=Y>YdVn%S=rGm5S1$h1UtA#~@Ak?)T{)!q zo)sn2FE8GzF9A^8cb6Yv$o>W${a|3OQ4AI5&ga443|^~h(vK??zwN=DYPrrR-XfSR z<*Y!e%-{V5ElU0vp5%R!eEdi1uA8y}jFoZBS9wkr8!pu0%CuV@v6 zQ*1+*1qX%xv*?obpernDB<`)9OZlss``wMn7G~{ayQPltw<+wnt1iA8^BMp}#l*x& z{LxR)%fsoJuyj|9EzNM;slbs*o<&{L~P?h5GmFgu`i*R;cf;yA{R$EN8ngHR!#S^*xAjUv;Kl%S&TTYEEK z#~G;-SiDMp((b^2&4@Z4O!RGD<{3+J`tlqDrwJpC$>z zgzDXS3Mt6?KYKS;8=RZ0Om{`AtLhbS9T=wMBB++%&I_2`T(I`x)stu2$TH`u=dT({ zI`9t)+HerePP;#EKzKwO>ay9Tdr5Eq1t=C?0!4W74osCbh?SM7vE0e2z&nEAJO2s~ z+7cMre|hs%JwGuo_MoZ+3A(EL-8FjkMMh6XJXjEN*?97Ml&C>x>2?0`iC<9-O<|Q? zX`6$X{IEj&J35TOjs?OxC?(K>+ymot$~}Vc5iX>mxJ67O$dRNjJqqA#Do2on_zFB_ z<{;P9XykVzget2aN80Au6z$#Wz@@J>x-iR<+Eebuva&>4^2K>6EPP!c<3%74Kg(1R z2fIIdQ-qc=Aw?Go_gK-e@v^tCmX)9qxA+H~P&LrxsxC9MrN094oysl6E7<1i%y( z93dH<`c8;$6FWFM3gx2H^xo$>v@|T|MFc1z{TT2nH~%niBNWFl3a0)rHOKI@+zL+T{#k`8!(`F*EJ;?clYai9t| zRui`O!5K=n==qa|UyG4HGd#*s*!igUx?y8sg-I$E3KRRb9*QCs90?4RDQZ%y8xJ~M zo&8g+FzK9cT|O6?P+44Zze={Cp~v5u#f8UMXlg@uE8$L7 zM*}HyO)Vt8;yCZZQ@0C{rI_WE*0$1MQHk|gF#*L=b>xbxG2W~|USZxhsZDOEqJJ+) zV63{m2je*U%w56*?JVtC{|#l=SFCJ9HE3EHTExMOlMV>PS|-LKGIE(J_*irXGj1Jk zqdhYvTf?#8;K2QPDx+soh0+9Gzb z!l8j7-Z}{-79W#a><(4~1Ye2CRf@kom8)5zRkq7&eYn*k+~6m8GZm>ijZsVdf}$Y0 zp738FZpxVm`q8CZ&RkW{{Hvi@S4$c83Ni}(PAVUOx98f#j*xnv5i8(|`Z5Fa(4Ku{ zN5S&ZyK~!j2@PC&XPe2u*tXH>EUje82kZPlq>8K3Mx>&FUNd+^4pJeohhxi%eW`~d zq;(~N`zVh!vBv}UJCyG&C4U}q%@TtbIb3hxVPb$*;wQ86hgv26ub+(bPP4kuy-y7_ zI*he1$SZSpcruL)@3s({TXb*IGk!l5h-E!d@VM{paAX7EbX{pmg+2&Zp2FY8xys+j zRO%MQL{_y~>A=gCq-&xzKe4I;38_ZWi&EAdTO6-cPv6@0C#Gvu-usk{*#4;ZbfD3o z8X7(l87+9|r8Pb`n)axjOJSQxGBP3`YLtZm%%kc^qgEO$< zKs%+lSgE@=Qq;}`!OrK)c2`&y9+Yq+NsE5|&?#UJTese49GEIDw_wuE7CR{4uclUF&=5 zYIn>!{6t35xDsDv3Gpk1P&j|SaD4ew)Wb^7vFM|N;G~=)E7i)hW$?p$a_t=GD&{oC z!Y&2onRm{M3$r=2fkTSc9RU<08P`1$Y?8NacP(kgnAWKW|IhpwSqpfcG9R0ql_j%h z-Nel{cJGzzWZnxJzA^t{PNtRFB#IPXKGWPVhA{l7hpAUbFjjga$06-xL1iNDEz1kP zg9Go<)2<|TT{GWT8i95l{~CWWi+^TW5u?$b1*d>JG60n@^ZK0#gf7CdW4I^rlJ~lUCCtcf3i})waDPt$_?o#Rq>}`WN z2SX=kZdit3kVlecO4gEOg{ahl;t>tUoS|7>>W|A@t&DjDl*JS8KYmTvs=M*-0fpi9 z-f1n!zYTa0{PoP+0vH`aPs1qrPgZNZ1>y4dxQrV`2JYPZJMh}rIO^hH(ee7X}gCy|gu_~zJfQb3|3Xw~!*ixq|3Pa9x%0Yn4QuguJn2r=5Aq#ynDpRJP=uoZiFm_#V%=L~3;VGE@5b_n%0Igk>vRKcE&e=ukjs%LF~{-(Vq$%q zjMf<&viHNK8!fhkj*E?n;aIi}4FeaV*%Vn-!S`vevm7~`tvsx`sQoA^$GFI-wHf)I zmFSEqSWkEypyLe#T!B%QjO7)u7xUTrUP}RP-9Q;p8~5TXUuMSF0-ZheDvRzI<(8)> z5y#levi6LVb4bR-gjW}>5&>#&tn9!U>?H7*g@zm1hZd6sPi?KVytgUhdP-pT$~nBCV{R0VtO^I%kGqiBonOs&*= z&OH-p{NHc&JO;awDk}gzF1xG-;oqwA9e=O|%A@AjqAab^c;&gh!o&Bc1$EfXG#6L* z*@=}%95!smcB0BU7gzczx!*MyL$4qnU0sIUpJaKwBwjWAK{nQ0?T37($&;qdSNFk! zA=9BsRHfS+dO5yA4GWpWg-1qDHNO?#o5NhYEf)rTGvUI`ZXu|{vx2+r)0<(Y&rqTm zf2n?&nhn34vbq0WtO#*(UoE2=s-BNYhe;AD35>{Ik0qHA{+$WHguI+9@HcKzdabS7a;eVU(E{*s$RNPq$PYBR|pdaJx5h z$R)o*p*KNGUrwGx@n*qbd$ns_6tgO;C=(#H_5aUyKU4f7X+Va-)Kg>tt0&{pE-CcF zU8ahX+VU{KWPkc!*Wb{pFgEdD`9mTOB2w&;$iFsl-q+RT=Uz+S&A-i2kRgl({T{t} z6y$0RuU$XS-^cMdA$;SQ^<=!O3LVl|FB}AOAH{6{)c+=;eeN<#0?LXQE-HWsAakZeB>_YvT1(dlB@PIWeSRyGo6M+hc)A93|!=znT;j zVl?^QAn;_kNKqDUj)BOwDyK_H?#{Mwa86Q^&l1v$-PXsL&;`AE&^Qc!_Vjx0ou1c7WWubC5iAWz@b zgyl>n_=?mN5~;SyS=`CQudktl-Ou~iUx}Mhy(+Kb8C(5s8GQ2a}=wY`Lpbwjk;IP`n)L-kj>sD;yYq-5`qq}s}Xc$9pMZi_@bLZDe zBgZbVRS1n*ig60)CIO#zx#(TygyPbnyj@#bF`0XCiSLjJdiOR@whF5v{v&*X$S`a= z`NZYwFhI-;vC6Y2+cZ0~3z9BM!~y^cH76#>t@U;?bip1raRUZ3MiEDRaJ#xCHjJDS zvBoB56qtc}(h2Qi@k5xer1*SXJjn)X<|BS%`t#|1F(G#9edXGf= zMk~9G?G_QonV7)p!5V^6wBs`cwUxwOkIrMuyFQKyR443>YpqGua=k~DacX8WTknHD zDwQa8aC~)mZCtp(b^32PS({5=**UcQcRoL8L@}2OilVz*$@j;V4NDniBhJ3{{loy=8{46S!sePhygnV`k*G zyoI}8dOYs#B(QBqDPiqL%E@K;5=IMrJNho-ff?MUDc%1*%Ld{SKWrOiR7)~%;Qp;&XJnEWkQA+2sWZ*h54p}9d9Ki|M9#6G3fz|4O2^RJ$Mf)FQfiZz$J z$B-ysp%LuU5yy!H~JJo17vPE z*UChraC{0e-D>rC?q_!|6>){u>3(wbZMn{_q+^l(3W$l{1dwnS)a0bx>h^=4D$pAY z0pMI`%kmqc`I69I4Oia4E>1fXmjxjkPLnOo&Y?9a*~{Ln3L`hzrapxUbMd_XDU^6p z8||DCW^4tZ;y`zoZbb3xyqWSx<=Can=eW65Hv3-t>E<$9w7TGDUkEje2HI1z_Z*&N z5SM`|(g_=$!&~kWgMI^fyOc}&NS~k5uP9%c&BWujuMQ-7Txk)dGcNY@aWT+?1plhg zBBzMnvHaBpXv-okIr&ZhNxEI5N|ArO^qKw}?4O#xpE=jDcO!A=MubC1hj16q_rCY{-TN=U z!#;bjv-VtbjydKS4vr^_Uv63iN=o;sRD8{zV3iacU3`y$3Aqs-EO3+Js&KS-4D>|j z-o$Pgu7xcVtYkKHbz7c=7~!>e`nnI_kL$MM2o(Gs-^+sSsc-9MaOeIEF2K%}WY96szl;W=WC_Yp3=@ zd)N2mtAk7L<(j%&R1S$@-rnf^Np9=`F%$U04$}4%L25&-oQUZcbM!V$zD}s^o7RN1 zZYwBl+?~<&H5yHi=th3%irSXp_e#W*8d;-yz92MZ4oE|lxJ?*dDM$;euG!JN ztiEx`kUFB;d~$=$nt?ZFy;Mjp%-8R>5%JYJgDjoroN&qgAFg3{D#gY!;95^VSv|p4lvgA@nD43)}&X(uUTJAbXpn_+(t5xJ9 zn0(UQsG*KKbOXz0D~$1xR-up;@m_K{k9?UPVY-``xWI(XYXB#|TH%$;iqWs=~t%Wkx)_W^F$ z(Yk!t@ohwyOBc`5M7g}|2>r?zwf;%ue;Ofao35(Qk^Gp)ComYSjtakiN)hD?sT~Ff zH_F{33il=g=w94HMTAd+w-v?8Q+o+{I1aUwQIbo8O8-nru6aOeyQAFj#mG}4J>Oj| z1RXCD=?V9H8&p_+aSImLZ5@(!+YiFVTeTRe*A?C6#txmGuNf8w*PYd0c^yfY3YYVi z^&A)nWjn+jY#Q0ShqF<<6cy}iWxA*i*x8DUS{-nEOv{;gyzKF0VE-+?(VMvI_NO^U zOri+8gJxCo9iGF0)iC7|ff<_32t1m&4NfD6J#&J)@+T3Z@FxzYXt<7Yyd-F?!@^oM zS*Cj-+eo83__EPza< z^MRhp#-X%)%!zOtS4zQ%H@sTmEgs(hVXY{7V9qN#qsLinG!%ns!G9WCoyDCG7TSFj z!i}z4H(FN{>k!V@CFP`2eso}9-%KNrWd_=IHqtrb9klHWy&RKmJfV!F-xPM}q2{Fj zE2z(0C-$}1^xcI!1-usRx=vM&w3U*y2@k`(c;ol{DIAl^$@}Gj%W?)=O@OP;BF#JQZBq#dZ2fX>9;seK}} zHk!1v79RAvr{_L9actWnLp}(_nlVY$nSIi|G5cDWd+#+9%{#+`nyk$s?5vGF zC&dG%!J$tpU_ftsJhOMS+dreBE~ z)7!4ejf80Pi_1cK_Bl;hlHvAlE>q35$L-+}uax6EKO1v(ME!7*){-H!svPPWIUMQL z#ugH~mCcRm)zQx|i>R<|F-&J@^0DrFy=YbuqNSuqcax3sgbdxC&kQp3tobC7m#$Zc z9KRk?lQKSNmC6aa(Ng?L0SLL(rze3cl8-DZPumJH&|qK8S1?Pv*uoIrqt^!h7W0U& zPQ!g%->&1=#IpKRG&q+OZl3b|YtM>G4GzCsD^C4N0)^5ZOr2fowRr1euK)*8O1 zVj)|h6AdDwf)XXp>faLWy}81eF1nu`5WH`&4Xez4)DpzQee<+(!H4p1_%hjW&V7#0 zBG)~#-s$nOIyCf@#{2rlVnuz~oa`TFf^*u?1w19Ui48w1}JWCMQk4OisP7+Od< z<#BVQc_{J2O6r!2pQOxY;L*B&OYgq}*lLNvMBjD;bZDZ{MYmEr{Q&xUN( zZBpLS30nQIl>4(1@+}ET;|}Te^h*kl?^dJglZ$Tskg*E4LI2l&RQ-BS{4ZX-=b7bU z8;_Ovi?%wuLxIov99_`PSj;9k+(>2LOQ-Q&-9NmDUfd?H#@C7R>beY|U9GJ4%p&Z422;c4l&NzhT#bp7Zyv=}@OACE#mhMH!_Oa~ z==YSd;{s(t&GZJoHj02KRZA=!JLf_tYUZBgep@wM{pY5uhN`r2Uw=aT;G0|`{t(f2 zYi6>)Tj0{lMP%078&XajFpktUrQp6I5r}kf4?$w&m~Y{rN*gE?8$C7u4EIxvJMQ1S zHQ~CMcfsVEvp^l=QWY19c-B3FUI1*w<==g8+YX~oTE7EGs(A8p2X{O*2H*}jT)F_+ge9f=&9&*Js3V@3Y@ z_ioi9<}k2~Ag3L6_{CF|(NC{tR|X%ZbDG*&zv`%3(i|x>ypm&4f%kGD1%~&ZN z7j3pT(`sl|zv%JEoYW_WTtuZ#>;10xTkavBWnZS<=R@N?jA9P{m(!{>jmbeWyU7rUyLP;ZF}ajkI$y42%)P6qT6-mhD&V z`9Ypa-jsJFd3zJiK=kQA=}d%1?$=wcWJ*VcgWKC%y@PeaLc&&$@0xR#j{P^X|mluM9p3N`^UZ-Vo12BP!vpJ& z-ba747soV46|4xv8TOJ665r7^d|q6*Z*ga`Pl3qoNOmKz6y4=u0f+VQo?mHHhpk0| zU7|vYkZ^LK({bD_q+DiyQ{*PN-nBBjk4}XXuHxAUr z;*L8^grL0Ioge}qS2A)vva((%?tMV_pRFO6snwt4#gNjm-wgYOAj3_?O@EwGG!^Y zP?+E-mW}eX^H#ac$!jUvk|s#A%TPxhL*jSnHp)h&$NY6vI`WU!)s~>7YU#eNtl6C7 zSdEfIs7l#F;4hD88@@-BQ+pLD0S38R(fk3L!p;O&K+~*jm%uv5r|^P`CKMaXFv#n; zmvv1Zw-=qtJFoCf7MO52@;;|^q1}PQ-240>OO=V~I78kMiZ9q(qjWd;ZxurDWcS&A z*B3nJx51OVTt#^hA@p2R?Dz|=JtzNapGXN`?j59YYQ9`wXff~x;k}({z3&1(>34`JAzZ`= zkkwVy5f|AXTBOqwPe;sX$EPviCz16Rl6K@T#TY&ZoGdY;|9sRf-^AVE#8ziV>|ppK z9l}~Q`x}>Iv5|6xU}C!(@KcUXV#{YZ(w)nztn7J=?tKKX{v-sOi4;sr38#r$(bmh( ztMsNEc#KXJDB7Qh?c|TT%HK{hH;{!Hc*N%@hQ*0Q6IoWZI)`>gy|)RI{JKad1mX}S}vV%kMVHjDUQ2dolVHY=!iRb=S}iUg=a-hyJ_ky zgd+PLXUZ}KlrFEkH3c( z4dR#N2PSTzX9$bF=gH0!G?dPLb2&EIgkgkqX7~Q@w8aBw9rM9FE%bOnW!!bJR$((ECYRVgNGAo# zQ*~Fpb}PhjK)9vup??q-^Xr$5`|qFM3g3*g(HFDu|3F}!WTQj!;D_0qe?R&erwz^) z70?+7TS}9Q`|BC&+p%#DMiAUIbn0tAc^Q=&{MPU+;%p`p<%RULR$rl+hn&}~wE|M$ zZ*R)MDID4Q@_H)MdG*i~opSD>0e+(?^kJ`d3;#tmy~}U57v$M)#LMZ2>;|n2eL#At zJ%~$WZf19zu_L9BTGG?=9khLWdTnH!`)j&1{ohcanl66J0Z+ic#>dAu?fP@O z6Vv*~YBQrdL3(duL&$2Y0%^O|y2}n6`R_n7SNQ7d!s6n0w__ljs2_KpBz0$lRxu{w z=d{|sRQQ_w^w-4a(JCUAo&rr~0nWCHhbYtN9Bpwlc^cv z^)~81M+)hE+=wde_fqF%?~MPTaqlbDaxdANNFdgeRs?tc@hIr2nd&3=kq-wgdVfMXT|MQa@}v?0+r?KA++^16-xQa#|f9~UQLy#vMu5( zQkbUXPdvRfzQ0cA*bxBov+f7`BcuVrvf5gD6&f3#S3L#(rhX(OkKv~~%~XlqLyENx z%I{og)nhUG8@4y@aE0$onLeK$D@618`51;K?POpm^NVAax8yuVO0!4{+jT2Bxe?%oKu1~- zLV~Vb2J|sQ;<+(@RQ;a`|8Y_fzsH|u5)b)^;?$Ew&gu}_xnJNaeq>0TW1N!s!Lf+8 zOjAnnedF_0Z!J{WIH8+Ju8>^ZKoSld*JsC99f68*>BbiXvf(dgPKHK{Mjc`iN^~tK zlmVeQqt&(&4b+t)k@W%_8#%*Epn#;`cV-xzpJSis(XnoV)Izij`__BnTF2(RoLydF zU-;}~Ti0e0U^(=nt3&wk`!aaU%f$Rnwyo4zWgm0mTsPQ8LK6{*agko_EGT*#E%^v% zAinAHJ6JF}TCkwx;vd17l-Eb8c8oT9DJ*5olKv?I=DL14HdKB!x<33S<*ym-^4s@3 zVn=2m#<@I;EmT-8)OCe8KY9)HKV5{|sKGARogBNrIiN3E?D`cFpsbW-U&zSd$n(^` z=OdR$Ir5MqrktuJ?T~9N3n` zI_QnGd__ugxV(MRx)J9eLPL?EFW$T5JGp5`7e>lnm!d(Bau2oFC-EsKXnCFkMcEDG6^_0Z zzogRI!B;OqlilWInS0OM|2AinYAx7$d+#Y?FDv1KvcJgqFu*Peprw~P4|x)WD8VXf1!JEL2lh^FH*-{ofI^M~cBkNCl4o#QQq$1d zu^p?``dn@O{nq|-vmI4FE0>Mb4bm=Kek;&Q%<^?` z;f=faz@R)l?*OGLQWhl0<1m1Y`zb^hw!e{E5S`V7Yp^lknLi#w_O{+Wk2UI@$cU~`djC%akT z;p=jD#y1b(U^2f{-z9VKN8g0ntAeu#JbF3k4+}~@{$<9|tJLC~YPIX12^B4syn(TrTZcJ*Y*K}0ti)GD-v0gNNm{T;oDN`)CoPv3~gCIFnt)d-7smoaaL0` z556^gL~fa%zuQtF#v=FF%3&?*XK%0Df^z#Ti{3W*^KCoy*O~z`rfg<*uf^1d*VvvW zDuPKF8Khqd3lB(mhGPdDMRf{)$3QBnVc&C(iTy!SEs|^7(!^5BbmMuN z#8&&Cf7)k*P#9P26K{fJ8j<*nTzFICRwH8$1aEQtZ-TakS{APUwtdIW_`|JM*K?CM z{(|qv&***4`fC}PZ|FcW>RD!lisEcNzu1xwp=;C_PsIE@h*NjKGe?Lm&S)Lyaz%7%%TL z&h#O@DqXRg!=U?SiAMh`*Tc`@a}qE-8#w!XQ_tf`FbcSfsV=}{^%Mt9chs(LWR6qg zmU$R?=I?eZO(^lqt0IPGUTKx|Eio#roVSNDt5K%-9`zo*y1fEsm{c@!HeKK1dEga` zn$EBNaQcl*`<_1Aj~VM!ZlL8gkF4!LD8ld;UF^ZD>GAy{9_#@spv4~FQApHWQZkr! zBD$SnKHlVEFDEabU&rrGTtf?UhDNlnElrKHy*P;7xoLbYdpHx*fDm{d4=*qMNxZ^G zJLvZkm4p4H^R4EBX`ob^8#c}4#%JeFnR3X~Nk>rb%#Zpd2pA7(GU32s#Kq2Mst;oES}Xinr=d=kj%g}PXwkaZt{SC9;nQ>ur`^!Df_RNWt-?xX^C zgf+Fb>gwtt;o+zcs}=x=Y|e)R0nRf(j40z-RK@NwZ2f2ycAViDofLZ{S%RO}u6p>? zT`JHoKl>-rFX!TmNDMtQsMrqz`_gNIT%1#hzHc;?wB3q|tCi+{>oxe<5b9r6iEOW> zV7qfv(GEX1gFx3HG7a)ZlV~wVHgAyvN?~A0U zRRmk)+L)r(OFdb4j#8t~@8_^c-IMX>&4cT5r0sHxKl{PB~P@hmC9%`&({in+^>i^NgdE{eFLc%j%lJ@uUKK?GiNFm3Z>G~^vJ z;a8u%wGz+8%CI(7aDtO{p&Og^9<_mUE>O4x-6*^&F-qh>93)T$`&Hp2Z>HqMX=yv} zmeh$|mZ_~O%Ze)0vN6RU3EKBLlqDNwW$fa|-2{T$*9k()?L^uJ>N`9E*Y1aW z;TSRo_=(JW?to=ow_)cWjJ^Lp)CE2fBMl9aPU4YPBHL2cvs^VT zjLiz926l8TzHCZo8d;OQbNT(MDk|M`)#l}5-YabZE)()3YlCc+Wgc0ekP;k%;U%cb zJU#+lY;46xT$GqCfml_(CAy~J{j{QQx{((F|$f z^-^MK@~O(2mAX5|^#_^)=RC6f&+CxyE(y8!p5_UYGmoI~k|0wX;XpLAJ@LE87eWP^ zgM6R&FRa!X>e&_kc%m zy3#!VSfFB3m>G@2GKgH0=Iw-mA3pxP_EcF>=SD*v*`ks^ZqWY_lLkb9j4ur>EyLA- zH0uo`U)w^{%6;6*(Cm#}wdTdNTP&5eVEtp4zWyPHvn--t(JN$2Z&|EImL+Jz!XFnh^RON zUjeh9#p=T6bLI$#I|-l=_<%3MH*IA3S%)(QKV^o<^`h8Hv@L7oj7J^je{gOVXMe~&_3W2;z&>+uTZj>Y-2%c?RjY&-0*TFfqTaQ z;s)CDfD!53?Bl2_;z%6ISlwDH8bQGs zKdpUy_kiIQRERECD#SxD;gc8A2eD7`)w-l{@ALBV9s$FkJ`iH@Ns31B@hKpqY#m?U zv?WzLDDl9&giPS{dF<3Aek>ZyNqc)~vCxv)5S~G&(NDUS0zE_@ytC=G$k} zR)NObn??WU2pIIBiQ&K>S>D1;H*rEusM9<-_CVKjE<@Fs) zA8JrsxL`u?v7Gbg$^^!@F#3F#5Tq}9q9K?Nnr@>`B2^zvQiC-kE-v5URHC^BncT~G z*rK$ZHeX{B&&G!Bs4!VBX9!0)hR<(-SapM3*Fly`@FfVxEw;)Qv-=SpM8GcR9r)ST z0fyOt&H=dxc4*ut?T`G7U0irnh1XEs5%WYVLH82CHO5vsp2MhRZkg+x1or-jlQA4a zy?Y|Yar-R<zfFYGYz&cpc^JO}@Bem^ssD`Bum|UIRrbfwz@b)#=VdZu;nR`hZ zx(E;~Ubo7f17OWb*&`w%*({rw1cTccXL)X6X(>~~4&Z*ilbzaAEr zmb%p{d#B}%%dyReZR8FM4Lx9)qJfoyj&ftr`cu6n&{QZR_sL^euwQw`a4)HkaTn(vI(wj3k7uw1h9@5T4KNO698@NMZ7@r3W*wW zPKxD;oBqgzo(r9XM8IK995zjNpi=A4V?(c{2&W%^XE+6!i^aSS;vJWo*1%4x zvTQvk=G4h}n&ko86U7nP#JdS3Jgu`KFv5>DEqt z&^!4F;n|G%fA*Y@Y&i1Q2)L&q@!ENX8^pnO%2R!b8Qybc^sw!!?LAq6^w@5}h@sNK z`pHUL7;-x1_~6h`YtpjYZ@twU>XxMDFFV;@R5=0Xx@`MDfanvs_6Vs9qZGM-T`JLh z(DhXA5pa@O1u9Ag9b>41QpOrp_{I8!fxdobWg&g1QPp+A2vGp#$APg z!`6#=QTRj4{h;|!62x>$uSL_tqM^&SP|hWxCHIuq4aYw<-k1VY8QATgq4KYvnoG+v zN1nsoS*$~GjOdnb!trFmaU!G?`-gI`X3U(c#f1Y7{_?v|RbH?*1`M1DTlTJU-$gcF z39+yYU>WFrxFD*tG4|Q|_+aUD1bX@F^Y)eX1@q{&dH3Me=l~^`L$9+Dv}_JsCVY;X zMUZwu-peJo-zpnoKTv^HEeft*9aOE6ywvRFWG`U#EfSbpP3k52+<;f3Q45ixD*Dd= z=fC&$DfmNyEO)ba_(F5mNz%YMBjRDT58>bXts+gWfZ;eFu3m($P^xZQ<@fF9&?gBq zj$qBeN1RwVSwh*$o;%XjajZFhuI@_cuSji%;mPvTfMb|VyP~dOKE2v+tux|s$oMW4 zKsUOxUw|NYPp>8R!xf?fl>hmMg;qV{Q@b!-)mkEVT(i=5u-oJaYBZ8Z_Sf1(qc=Y% zNy`2R{ChP15_hdK=r0>+ct>iMYQg_!NxR!V!^$X<*FG-m8yToOYibXC0O!W*epUH~ zF`_JwV%^cWuhofCci#3rRwH(4{j8jv4N3R~oFzJsD{8Q|sH{6@Q@OfxI@-D<-#K0r z+z{TR;jOn9*0mt3uOXR-hz;u1yp*?aYE&H`XMD&fzdhuWbskkaBhaX;lO6q8=b6up zgtIM4$Oh3(%$ygi7Xl*QLA|0aJPuDKbDQyX(JVXoLbTGLpF`2sS^@J_bxuxb=i@pw z0(!JvY0quy_*lznR9;g)bG{M}owe{V{oOk{S~ZJiT6`k}CT6mso_Y)e6pK4Rp0SW? ztiA4|u1;+|vxOiA*t3ct;i&C(hYC7C6kKL<3fm9hwMDH?U6D@#G2o@1-ltX$xfgtZ zeYX6DWOeinz>~*_*Jp6I*E0u(-W%J6J7~OBM}~Ytzr7RXzA$`Y&$G%CsvrHr(q5g+ z=-yJ`&gy!yh)>&ex*(JJ1|jfn)cff)xpk(X;qQPic(=1q_~hnY6$riOxnW~Bf`$zHC)mnHk6RVdHUn}BsgNUus>Ip5tibEt zhqMrn>Oh=9K!C_P+k?2t>(J2&d+m*>ZUhn9iO}{e>AaGJrnlR*-mpOmBFhGgH8w=o z=@G#_mP*qY#l<7z70j}~n8j}k{qg;)?%6><^uns_WwRW~-DDuyozAuKsooJiZQG9( z+aD)VU@m)I`m;BiQpsme_8PX*7N$^wHxcM-=c3W}Tu3$1zFH};s> zhN}-YKxJ0)u*?=D<#1qhCHcU4x^ZHZVq;;EY4$`oKgeUxL1>U!*w0pri1b?<8#=Pj zY}ue{7CY9Deu!h2U6C;lp~p5nVrxSyl)&9|IFfu7y-M3$Oc7y8_2f;Z^pC7c{N@An zJpS3bHRSN}zi1g}qE$X227M92@0kbXc+Bv2OOp{@lxW(TXyP)GR4`WM_jSkU^c9y+ z*3hi(IJhGHhG^+Ie92kJf4ux!6w|s4b_N&NrxkHx2QZZH->qDjx>cQ{qUa>@54`^- zesft45yUpJj%|CK{EMIg%jLWBYv|b!)}XmS3u($~hW&bJS}fSiw1dL>f6wm#-e@Yg z0V{>$l9KrYQNp>9op22LX^j2Sq+g*&RSu4jTW#P1q2S@vDIDko>+)19M!oQrqsXhm z_E1ge%FotlnC>hm5HO*^VJ_=Wtbsb$JIl9g8d9CoLYF_7mhBm$p9Xs&8pvLOQSGQ~ z98+u(&TddnM8(EBaEbwrOoW*8?7L`D(xqCoJ68j*Q%GiTB8v;@G6ULuGlW!6AnshU z0P`0=)+-xC>l(u_yP9v-BmTp62^u-)iH_6UlSmI~l_FDSWwHMF#lSeb2cR2SG_lUH zKsFjBypx@f4oCeU7$Fq9&x$MFk&`dhhSn(x6@gP$F>>#e-elp(_4>WR0K5(md|N#c zdcfiHYoU=KnN(m4Z)}{ejMp*XN+|+L0`+88QDMZNWr(BKlmjUZ%2m2 zZ;wsyh1^7iM&bVre`R+W9TVRG*4Z`Q==M@B~GM3{39 zM#U;n5q^5IA-j*&R9*inUIDsEO5Y=|H#^J7(5dpfRTWh=07rN`a219gOhAP`Pi^NL zCeAN+B%4o3Nr~;1WFAtTB!ajAXbC|sP$Z~s6?lbFZLE=&8xvcFL08ktF* z>wkIkD|u@D#cF>u&|E=LTzZ;y`)0KYIGPDmYXuAc=2&%~WlEonKAV%J2qxU#nU`NY zKUim9(x+Ms<7Q1)u9-1TiK=)duXi8d(cINcnLi=hjS&E=z? zwJVL56|w$3$rOr8u&#Q(Hmpm(GxO2fD92*|2>LMlF~Wbak}X`Mg5` zegv2!WBczUs9pPMV8HAWpXlp+>BA@SWMqzsH?^g-WWDw!rpl|P^g31oKT9LHm3=o^B?~8b4cZ&YD|*hBBUuCg;~sASOeAM! z?zYZ;zprRM=={Eu^KU2tZ<(A&y{GHLBUDqXXGMr>^RCk(6E%d;ij`pv`T=a@wc@Lo zeZYiyl*0JBhSnFPLx3BK3^<7Q`DJBKG&MD0`!z#lRgxh8PKH9`6sZFUfw2FXB!Kxr z)RM~MCb(J~h<;j@{JOJq4F?;gwWrj=2BZm$G!}B6rlvH7Z*miX%t1|wBYWB;~Gux~C)(Vv=@W_I;wOCZ4P zd>{TGb_&8RO{3^?^u8!N{IRyTu#K_JtK^aTpMxLS?9VXCc*5}t96N^}!8hsXNiG5E zT1sS$cyn`(m;lL}STWyr)0Z!(rlzJgOHBffO}mUw$auS(K4`cHU2UbGi%=z@&l>ra z2OI#TE*f-5{Bn+3bNmiO%Jk|7m$z@UwY2oKPXVFAPc<4P=AqXp*)hV@dDo>D_n#_1 zhhSaW3{1#2ym4Np$)W=+A|byVXqzlQnL)$f3!SW*V_J8+>_j(T1>eKDUapnE$e81z z)s#}j7o39#iDobE!>2c>vwGnHW`8yZ`iF*I`un#?FoolF(lE;jqC56#Y242AtVK~L znkR}pZd`PYBl_u%ZcT9x2sBn`e*mF45mYvmxdG>7ftPFOHxyXM052PjJ+ZI&UZ_RJ z+v}tucf^%nzy2oZIz_HQ*aUN*z<@u2LgX#2K1G$Y-IuM=S*Bk|(|`dL1DQBYB|_$+ z4x{6BYL6Zr5Uw6Nm)w{tQfc3rU|Q=c#gR7~#_=EYA~?q!B_N#rQe2Etu2(M$ifB3T zexc$K9;~}xwEY=m1K8)ApFUY`qaytS1Jzm$Q>tPkO`#XRCc40=^uec#W5IG41B_&_ zOBO4H!kx|wIK`QyfhfqK{d(5U)R;6_a z_BzGDz<}15srivU0MmErw0Ku1ENv&+kL5_L@unm^L>Re#jTR!Jj~|)8@!J7??n z*-GPt=4baKz=+y@X3m0wa3iE*{ohN`D613yp?j<5Fe8a67VR15{Joq(-vdO-0$U8S zr>0Afn^Dj>mYvP)-<8A&8iu|uE{;fU2k+f(~MDWk&sDVZT*aXfHBcn6A$U+*UAljbjn~w_|YDo9o%(?s(dm&9L^kXlNejo zpRHHZMOT2QwUx~9aJv})xm}Z@x^_m%Auon50^$%WbupvPi0_Nento~kBpL%(nM?oA z8ls9>`fM{x%jh7BPp`d-|C6(VL1sCc5IA9C{yVf{6q?0H58orF0VbFKJ;sLL`2-iR*V?&( z9T>mh7sv!FV`v`qQ^ug_Zj*ahaDaej9H$~*kq7^>GD@b5%wDKwsVP;XdHDa=q5oMX zZVjU+o8{R*C1>O=z zvq1E5dKA2+csGRLA z8=<2{wnTAdIsGcSEUFpi6I{eP*Z0INAWKXaCCiOvXt%!`}t_=m4$#CsW9@ln68yWCNY3AN!Z9Oi|kIuI*c{ zf0uf1ebjETZd8B!_5n@Z09ZrsLpHrNln&+bT|oT+U_)S=yCRvMG&C}D1^~F1X7dK4 zb|!Yi8#S*56@ zKj;hLOn;P~F?%73k9M%FV4?k4_jGirCx%V@2~k6+_IoVEAg{?4{p!#{@OP^NgPYP! zZEDZnreUij=>>RP+Z)<%q17rL3#dh1!b>D%+XO&$u0Y&&I-G_ zX#8!fOQEjw-99YthFQoQKIjxOf}P!mL4)(eKZnD`mbo}l=G%8T`(#Y>GTGYNT5%pj z^Q2P_79-3Q)){ZB@@so6&9ePAiE`HEq5T}HWPwY{{b<(Ei*i)_L?s*zGEt#h+p7oT z0=)sY!phcjz0J){oj3us%XH(wnB2f?%!_3|t_Sds!sDm9R&=EyTL;~9QcmwbGa@c? zA>(J|+*5upqa}h7Y<@&)pAIF~6lEgaz-!{N+CH+#PFDl1mzQiUPH?ostTbHn{7#WP zvPjc`(c0!ZV(=WJ=WJ%nl?-IT{4xwRQAYodL>6zRLNmL0|BcJm9 zE)KL(x%BEV(9qB>0p!JP_mkPL&F5sBR1iXng^it@`Mq`ZTUnW$i%WHDF8V!(Tq~v* z3eZ5fkx|dy+c4!3$1)hMC=QUI3PZ=X2GM1k2rjxQTy|;4Cjd;v04KlVEHZq zK#;*}(k*G#v~! zo%0mgHrpzENi(1LMt1C$$v`_AL@e4ba3)%4+Qx#@4@zLpReB=)_jD@xa053g* zU-2_$K_#y?Q$9&n2L?zl7USJuZC}5#c`C<1+s9VpvyT-EN$M+$d*1&6+^2mDC{P3f zLJP!bJDup9ym ziII0lllNgfF^8T`sld_&#yv08TS*CPeSN)*2#Pu3&us*~WS_q~@&!sX_|()+xOe)| zOQ8;|oJ6F)5xRISJM7PFmJ(wxBwy66T4}1NsJzZyrTIkaxcLP3NcfO)byz#A$L3w= zh3dpy@tf$pV3&b1?(qT=@oCe3j`D`%IY7<;J-=O+IcdypSXyjiV#O}5f#0-c{~QNx zK?n{VD1?RFcG265^+!6(>wRhiYQ#Yar(m9&U>09n*lcL1=7lzQ_2sczb`Xl&gw^d!oD1m~mCgpRN=Psuz ztisi-CQMveXh+pit?yIyC?Xv7fw@EQPCtUrYPkKufgQkZqk1KkEq|26|Mb(%d~5-m z7j}2O=Mc&VylPoML*5rI{Py4r?=GDB?g(8Rq?xd_Nb-IAiKp4LM*kkPn>u$1__-q? zUlRNIXvG$y_ftt@A%}m(I=-chWI#T40_sV?YaI_0tt!TFt$l#l=9fYAKj1X^@tF&k z&uwjOod+_?Y#=-l^YHjhNgE_%K*tS`TpG)4i8%;t4t3n#=uPPQ@=wbC9+pd9R~tWI z+wSocIAhV6l}}br_bLhn-EwvZ(gl+MJ@DAE1LY9IHFR**+R;Pf^nkeq3%RnXOCR)&#+&| z@`|?9Ooqk&H))H_`%f;>S!Koxd3-<|AYFMlBCq;=s@wNnL&1|I_Xzm@`YAHu1C0LP zERZpy406>dy%08{G?GvdYsWrTE^7xNeMYzEG;r?Cs;0io{g5il{-pkIrbcVd`Mwdy zMrv_+n60$8CQGR<7Q2?A#S@djQ$HCY^W8Xy{2^U={NK;#mps!E>0z<|_>*c_RayIZ zxWKONb@6=-$6&Nenly*Cis#WL3~XhT|E8`cA;kM~A-^YYbXp??$Z$RM@D75q*uhtcQ_M0r}byW@o)H6P1rQX(bHD2i2-lUy|j*1rAsHOcCp4-i_X4df~7Pqi%F#yi&VSv=xr17 zx1|=Q*RzIuGEw2&^a_YbvIXfvQYfVBs=Lou1CeLnB&?j79>Z6;z_DEf; z_&?s%bbyf_10sD#uL5214|oQfvCKH>~DUTUE6!}h~_s^`Ag{bCr|fu-InD& z3`oP*m_mp0Y)1QJybjcI*pv$O$O?u#)l<3d`6+jzdj|~$-=d~{#8L>hcsf3SE9wZ* z^ZP6K*MrX9@vSU9_P>8$n=igbyxM$^(!hBk32;F`eY=|}sj>2B*p&)n(ht64*P_CV}xU<|kMRdNe)N~kh87Z|^J_u_ll1b5L%ELQGw`lL{=lnu5t{j*6P1d}bhULLmKIG7$n7v=i1v`s5 zXQr-;z4g@Jh;ON~uh_rw4qkQvKy%CAyCNpsNd>L~yRcqmk zb(h|E{5HR}IpXx)-xblX2#Lvn-Bd}%`7!~$924@07|2Q15|KZM{#gfI9z5Dp@Gj71 zM8~2h20V(6_wfIVA+Ij0oOe?_>Ok{5Y$$p^Er4=XBiAzCft8U=cwgZ}Xh4K1e6EkQ zdPsC`Me0czKeAuoJjt%(NC8w1^tKde)R3kUG+k|B4m#)XJZrIRK!Xy}+9!#vPg z$Mx)`zrRY{8p3IN5*_UVN)$Pa8V5NNSyc34R=)!==^eU|(`D%~f{rZr!_*^_0VaNCmy8#B#E4#_Xj^|mCkLnsGogNzYG7sT1M*36_ zQU~y3r&QU6U96z9gZ6I^%w>byc9LFBQPFPC~n3qS5p{q0w6!GgX; zr*3;n8hXE10A_sq_H7l&BfI;cB6e>~(_9kOJ=@}37W+YCqZ?Gp(I(-cpAK4%cSl3b zsj(o{xgBW!?tEYYUzuQ6a~-E5f$>58WR{`lx zX#ojoL`q6YrMtVOyM~Z%5K+33l+K~MTO@`K0qIgY&YJtV-}k)dd^m>>d|-ae%-(zL zwXf@6SM9!`tdU?Yl!BVn*heW9W1B3XwYje9&Un2hc!2YlbU_}!gN-ADC|z{x)?5#l z41z9dRaW-bW;hJdlPM<4Q&u?=I!Y&ss3p`HzT5mU!vD!u%fZi1d(p-4J=u>zV~;)y z<}zTzhQGK``P7yoi0d|THnoz!mohqP>_J|%2$>aPz!Xp}i`UfxB09I0Z=j)m_1c+) z55Own^Z)fr3uvHe?%oMxwknfpGj%w^s0SmWQr@k>^Io=PuFoYNS6b|@l11uvV z%r!;m?Y5;oLyIzMDK3(m>X+_z^vDHa8^7%F!eQO~`)`PCi+g@en?s9Fi6|EHi5$D~ z6i`(VCyWjO5>!P^8xK|x5RF7}`Q3W<^f0C6&$X{fsYvvU$>#mq+FD-?!s!FPC?=3v z1_7WN9ud)It2*D+)s==Bhe3VVW=-B4dnx}7MBNF;V$#p>*%UN--HTr z&5b4BjJ_T{U31A7&a%wP8kDg;b4)!_)we$l^h{T`3v8;gqUu`Pn|@f$JOBFPsboMJ zYEd;A9_5}^(Rb74Bj-iab;g(d62jwLZ}yC(9qD^yUS-j9H2QF<1|%GBdcr(aHLA}g z7e_lYOHY9@rFjMu93fLX)7-3ZY(~#BG@(Go3ia}iDDI30{zfJ z9Q)~Wn2WiE4z+%%LzULiYD862`mIiLt#6?@WC=6sd0np}T=P7-VXPx}-8#X>(8@N9<*NgIs5h#%un{!nd#0{s_;u42~@bZinlWQu@$BfXDycyLZQ#L?#bC z2kx?T;Y(t)3!K0zOyRNU#Kg|d3qpI94hA&T)r|qN_)b=Kq234;;ZoRcxrG#rkw-1r zqg_;2l#^S`|Fk$efxqS4n5tKMzd`-b0cZf53Ug`OMJS$lC}#2B1>`e_Wn2Js&(pp5 z@W5i%D^I|V?N0w(9IJj%K8-gARFsfu2A~Ym(DVH}?T%MRH+PPLO#z*{Af4|EAyJ5y zlCc$+Hc?;jdIngJKMRzGrmAZJ7@CPI;gZvot(NDM(_8JgEkY7fQYpL&%1>XLsuB+2 zD(kPcPV*Bce#x02h=uk%Pv5wvnr-UOYsp36^H+~V8;;&xc?_n1ksdbfm1m6QX=`h{ zXC8aH-@g7E;w!+-O|Y}GbF+D?V)#dxA8F&l5ugU^gN#|VY1LvsQ6g-`&CTezyqBv$|1SM-f7fc6mWf_d zQ=pFQR+gr$Gh|z|dcQZ3gQx*2aZS6=VkVkt&gskoijFlVhW=sn^z=`; zEimEYr5mqaUZ8++0w&@OppfYOP?!LoiVv`1hLMU)%wDn@SS(kh~oz9&Ho7XHW zJ3eSxwf*)qYU#PC(wdN<)x@*g^92W#+lz??2g8v}VL_`ll=6l1h8XKf=sPpWn%STI zn(=wb1>ky{1w}2LRCEWR<$@H<8a_-R@6D89ypA7T>Vpf*5RoyDGwr;Ys;NC!AJ^J1 zkpWD@Dj4Br>%jR6tB~MbFtfHUsVh1d z=gJ|3Ma7XmjcdJcXAP}LJJn!Mc|AEAHS^r+i zOjV5wXCQ!;{Yv=HK5XjV0_FMw031o#+`mw(1g&`S{{cY3FEW60a?~CIlO>|>^&d25 zSsZ;;h1WaZva3pOR{y5E&1k=E;8wukViSqh$7Hb5-~|NYI}iB@wC`i@?_okFE>^4eV(xrxNpAR@NiKryzuvwMERH6r z2t%v+%}yYPfoec9ECkSW*5Bn(?BAmCHl9(+O>8D$WGKb#pv|nbsQ2=oaBEU&A|um&qcf8z#V(!nc89iA^kP## z=cx1TO3)njzcoDkxkH9(7x7QW1Ml~+{zcZ|K{$3IvuNes3G^?ra9m`PG4e}8VYkKWD95wwpY&bJO}yX}(Ykh1*|EChdE>=iQbHr=+EutG)ASLt32Fs8f!pHJlCH(SUi^PyS&j?RXU79cvW}fmQ~Q0MIl%yx z-d6=-l-!~{lHABZ!W#B|aLogy`e;^05|51hB4{kttXY-#dn(aS_E(e zWKn!oyk!^PaK)ZFN3!fQcv|=sUtlV!C`X*s@R{uXVD&6{Wd+C0#b|Z+KQFF3%cWC$qY3#XmKp*8Hw|i9JnXpit*d>uD@>hX3`vQ405bywg?w0 zD^~mUOgkB}W%dBZUH71Y zCis}~$3Y;A&rKE056akgn*3rWKbgj!wB3k4Gpz?|6tQ3i_?ZPI;%cy88Ll~lwf}nF zBHZomvb-rw7i`7!`dd$~$9IXNGCPW8HZ1)qg4#y<;37$HF+yzX467$~qP`2Sx<9(V zckdSZJ*a~OMu%vfz;!PFzO$p{EmVZfX=8F}Qh)~=&jNvZk%kl})BRHhv~GqZckO*_ zOwVY)d%u_X0v*jz3`o)*d-@aEpKO}6EL9RhO&^SlO~J@cIf=G`y%+v%4z-y}q{;Gi zxo8QwcfA~DOiZKv*~g)m&m8jp^O45WLpmW~FEUw#IwimQfDcp$o@E%kpMRXnke~HB zKO1zy15rC@F&R!(3;%b{a&KF~5axKV`B$wZRa?HAANaX3m$9FSn$eh;nBD-2xXSO& zyQjC;YaAU<&rwoS<*tdU;*A@T5(E4d z{r~;S(|v2Y+B|{)&IS_zgwVwNhtmnR(iu7(eF%B1jP_9$5J7nJ|87v>1^KVWugJez zX%rH|oJj!k&HgVell$RCKK%kN>5&^r68NV7Q&7sl+c>KT&O|06$0xSQhW#-~nJUe**maRO#Nk z#n7!(n@XV1JrZ^U%JiUqDhMWAk6P)xDRTsNH}EcaUn<#oCEb&?Q=4chWqEBZ0r!Q9 z|A!X%q(}duH-MHXnw$?C=qUIv7G2_#wnxQC-Pgte7!ZUXcXwSOFQ7| zjL8-+m;d@!wG+HEzdSGNkLP7#*zTVDe94M_#f!6|r z6~wk4C@L%CQThH+hCEancLQoEtU@-5zie6P(t!rs4Cs<^Xs}&hDWWQ;NgWW(e5Pco z{>Qq(@RqGwAe;m&R%T}O$w`Zjzn{N+F)M@JdkRstfQkzEr90`9^wx!S`3MiQ{O<*p zJvXo*(bS^Lkhr3kCf~oc;KL+02VSsCf9TDOD2p6ohNwXtqE;CM1<4KluFU1LJORVq zdJ$fbU0p)fTR?3=^abXpwN<=?15+Fpwy3h9VkAQeeJ-CV)L5VhcdEdZ8wMq}eb{3g z6oSx*d0avg64RkCFMt*ufVB2nIr_@$L@?vA6`6ll1O89iGz}Z{{m4jk$mQxAYI5?u z*3C~}zlH#|Vc`A-obVo9ht`k~43c#~VAURO-A}BKdfF|In*y!KG6dU}AFpCwB{->| zyoZi}WP{p}y>nQ!T+G1fZmDi4n#nLlJ=~+9qPMxdtb0A$2cm$)2Vx7f>a5k}v*03t z3#zJ$6EzLvu_nD+%OPhW+v~Nw0_rwyyNrV{y*DIZG*$~Yb&vZhzH>PlAlDNnrfk2n z<$DE?SgjfhdGHpsR`KvC3xZfmjM4O)#f#ySi5^J{Y{TdaKixPvI%p$TLxUq_JrI5C zoJokIc}5YzCgJzgK1p!O&%$=r&kCYkTD@iI zNF%yR#feBhlOEI&AQ*1OVw?0l-ZP9>|5(V#*3B37(njdL%0?ciy@Yi7&vu^WtVV3Z z(UoZVtNeH2?i&;A#n7LNv#sU`di$=qZ@~~+xm*~a%HTn3AfoF$9k;V5G~MKO<2!va zzkkAC*EOq8CVjCi((Zjvy62Cau~&%pG4xEYX0-pjvV~RCgHs0gxo)}UceVXgxA}L7snelb&41B^nP7BN>9TeMVEv z8R_ZKKrut?e}9&B(CKUlXqFn27Pq~bWla}UZVcsy1v}GLuj3U2tt5SbTqopK%lKU0 zc-yco5T$H!5zc{GP?7uV&bDmK{eRR2E(e7Cy)2AJP1KR70{}-icfr}SZaL*&z`CTu zqGtMiM^}_}A#hyo;nr~rnH8q@5oA%8j=!k#Jr7FRQ{J6*=v{_;M9?C4;rWYKNe>L^ zP68f^2DPL%^@K3_mR4XP*mUMG$U4@pju>unue+l7X9H$>%TX=p`rD|NTR$Q8^)3@R z;-B`1JDiNyZq=KuV11C-^lq(E6A-AnOpK-{JZ7766(nh9sQ9TWYjl$x^12^&E932B zVHY>vL{hGUqr^V{as8|?*q-y6@@)~WVE&IkNo*mT0~}=sw1b%mx_Yrh+F;b~1wUh} zQgj6`hE=3JO;5o4#zK6Smr=I-@1K!+#>x?32Z&}`7j2&r+Lf&UL(N4dUUROtE0W2s<4&w= zh;CAiJ{}bn@>%-F{IQlg(8FTAisZYsT5l0Rtz4t0 z7#Y(60xQH_YC2X(^?u>luTTi<{EWrOhf>Y4qk&+gPPRA%8h4p*f)vt!-gQs5>T10` zfDaG}^-y?Qp@H@;wrq*y3R=G7^e$R?E($2nC-K(!t1Vw+AO57^TCNJpeLWh!*WH|- zYigaIQ5ZyKm|;r`q!IuJ+r{HidS~e8$$0Ce#+W`)L8S1ipC_+*Jv6U*mc>}6BxPir zllHD?dFP#Cbaj`J*@ap<&o|1~g}u5_^|&V6wF;4)L}4C%VAM&^Tw9rKb&XBjejr>J zdl{s&mb*qdQ6bO$E%|;P#+p7(_G89A_KJ80@De!azh7~0-+wS8Nf#($lX6q+R&Lc< zJ?(zI_T_O|3@Xm)NzM;b33o3$SzgI{Rv3J!=}bH%J8EMk%J{{|{Uk>`jJes9pwl>|YKERj-7zH1 z&x&}U*7;26jG@h|_d;!u!b?hVJsZQi``f`rF*Q z=Ms%;?mV{?Bm3(wUcA6c&~IsZ>S@nE@gS7ljcPQkV${jlVa)ePKkoD&uoUG5wK9fI zTH}U%l3G*bQEUf;kB=Z-i6ZGlpUx@>Ig{XkYzHZUJ#<{;d8YNdhP(lHnKTljMIS zJ7TXW38w-g#+hohXQJTZ@t*%K9m7Ux6+BMfZSy{H_Y5ZZuCKn6P5`bb=vi!~Lx5bi zcXNP8nrNv3DBB7uYY}v#hLqF_N}+&x#ObGseH)Tb^9orzif5@erjnu7>}&Tzt?Su2oQv^uB+%#%pL1MQN`XeH$S_{YMRO0-uA|?6>RN>6>*-i~dsu z^ZZIuTfHh~IldStA+1vS=tX|&*ezIdvW~-|q{99^9ak`rT#fK2$%Dsh29BYwrhWYA ztX8}R-&Z748W!EE^l`ujxuI;{mkc&Jr`D7j#*yKUzC{(EMk9iIli+iX;f4-X(qB%a zwEzIQ$sRb@uBL~V#-L~|GWOEITh6Q>gV>OBJk+9VKVhw)2`-AdRlza1^Wv!|<|=n2 zkK!XT+Df(kG7O}$RuYlJEL?tLQl*P3zl1kB7FO5>#okMPin8r9Yi`l7RoP&-pP~9^ z8-Iom%MI;%shDAJzW8VyBY%#a=A4VDAmLY{T5?-;YwznqD1teac|VcZD=La0{xFo- zx0o)V+eY%AL;oi|UptfeBn6i?7%UGSaC^k#9yBBX>pCq(gj@OwJ<#SUC#I~`P-=40 zoU796uf2gn^<96EqxrIss?qt_3tmlVJ@My;=x(Z97(@A{w-pE zr+7S5UdrJ`{;}c+S)>W!sXrFyHf7D~ipimpZOPn2I5)5E{aR&t zad<2zMxU?b2P)13$<+-O`G*_2<2&(=t&nPDv?^)>@}0iHJDrWpJe~FOUk;9{_o9xf zYXq0N>k{e9RTNc_O2sxsG(uQ#;y!cSKP?TQ0*h!~FyL!aEW;a8Tm{kzGD|G`D9bVG za187ZaaP;`v*bGC%dL-+??jD264wuofWeDL!)?3-#fxLT_b7aMq3+hPjAMs!d$IeG zbl%@U%z=CN9V2zHpIrhtaY#C4{yqqoYj{&DZCrN_FOL^ww-yuL9O|4svU5hO67Ax$ zFv1{F7`@_XD)YfQ+4FzXb^YRZG)@{6xT3t&xHX{MabKLg{F;EMrS zW87+zmCO>`KCFM7>?0Vg9>(RXlaqqC(oByxbBkb`zWJP$#`SOR-tPDV(&>!gcJSPI zHp;*%_Ndba*V4(lq_`u<4F?v78ef}Ub}o#aD{6rmTWTWuj;@oAEidP0LKD%xjAnE^ za?59>tSgX$dHa?Swp%2vs~}S%Y>i#8{>{h({%IxO0>s2!$(FkkAHi5ggAIST0yph@=*}y`#oLAC%ID*^SYDUzCl)@IvAv{MOrzr^ z5E^+KCG7xh7a+Cl@sl01&aTCdsvC_li8Wd_14R2SKFnhOGrVFz0t*@Pq$Z_J*q5!` z)0crAVi#IA+4wRyE$=7d2Pa(Iui(>y=TwJcIx<1gxZ}aid8qD^+;*|;VPQO$$f+jT z<3+tGZ6<`9VSAk)>O*rbovhySyc~-;FV$=OaW2EZ zb{!wmJ5(xmdvim?<5oMn{blv$$05_s%lh;3SBzIQ8zT}c^ANze6q@}?Tn#{r`V_Be z?J)JG)zyVYL{Bg9_Rw->c_cFYiFXFlw~KPfnJh>yAOy|A>@A_KmDBT*yKB*hgB?Ho zl6eX}hypfm>A*S5nxr*;)(gPm;(5O_i=tdFE|CJGKl}`;h*sFgdYZsfaQDuhGxv4j zoLsEmt%HJyK}0h}W_$wziXj?iJ2C$*RrW&F*jO#DiQw=1*2(Z~>LiEG+ON7*0_}wA zf{sT9t#wbqjI3o{a8;N{zBlU`T`w#tw3B;3aqO|{$eDfo{$PX-paOH$Yo45b#_Vm8 z!f0M$%b`EsYG%8x^8cbd)wJDrCLYR7g;0}zQ%g)C`tXrHEe$~~oB>|Fz^|2?*lZ1b z+Nsm-U|C!@>A;x~YXj)BG;zT)1v8t4xfI;f^TLsaY4r*UP&fSVPSyLaOuUsmK zuf&j4?0Wtj*|?l+H(0JYP>=T?P>*)YgZS$8XoMp=YXX~VeSP5wa#QniEu&*eg?7Pp zY`WSCq#qEw_K!qj`+QLSwcqK9AF+s>p!QCpdUf|&dNod%_SyotQy3~rMEk?SI@O4L zYj$$HY;Yq-_=GrkWTEMgXWm+n7O;+j*@%)ek;1-iyvwQe$Y-O3b|steM?h>z}^5^wWRRrmGv_ zkhE=j`k8Hpg{`RgdvKDX8C;1B*5c_rPR@Nk>Q+@@V{qdDd=f3&yvSvf%LtN)r7xAx z*Rd-Q7UTK*Pun?1**sS~d-To}g1=xDpGh79gB*?6)B4L)zq&$78pGz`;naURzwr=RaTZdG!U|Z|5OsY4LCkr{6iUTzXPK2>F?kPSp(o zp=51VTX`p0-W}$TwKZe@S{vr&Hj}3edhjGgzvooK-$j2AT61q!04C}oGCAjNI4&3*!=-SxfU~w)7i6!(@o+pf;pZkiV7YYupbam z$6|~DL}*vK$XDLsg+vUi1^Oowh7t)dPQ#W@{r>8Dg$=8 zz<6edpC7rj*m;?%D@v`6{k87dK{xw#{d)U z@2leBma1&W$fGaBmaTfXg?9Hhz@|3w-Q2`64yQCsKdG7D1I22T7*tf^v=Gq%c4KZ3 znl_!KnFn2j{oO1-FRg35JifphV0Dza`$@t+H!cFD#6C)le5VN|`r)g?xc*sO(Bw_C z8QWIxV{r>V(&daQ?3!7-VM!&Up1=MDr5!C;Ugzh_pYf?fKx+^gaFBqzgDou=j6v7* zZS^{@i3W!H0c@zGrBa+OZrI4Rb{3L6g;ufji1YMoKLhM0Xn4j@51(Z;_= zIX=$B-{yk%+Hhp9)tiS3vd;y_(*QGy0<6iKAOrco^l`)N&6vRdzyn8^02%FlwAB9M zf`5j$sS^36hi^Gson%tgb=TlkQ6?KaLKS-PYkBK;P_x2*48cupp}w)XIf8$fX+FhFu%0*_S; z5a=2MR0Y6twI+h1qANGyc7%k4*)H2-{5#(gI04hw0#`}{r<1Zqz3+PV9hOcaK_1<1 zEoZ-EQ)_;`yF=n;S34w10^X^O34qH5y1*0V!__4$l3Y9^Q6p;f;44&+{zPVbLT0lZ z#5HY+^&yHIxlVFCF~Tiz0Jz5e`H_01npWLIw2xAclFZi90A*2Y#qFnGN95H@)G6$s z={@Yqg}guwCi9v)0F`|;{2-$L1tw8K{sL=$95B1|>C}tf?1PM=dM?m--#`--MM#qj zykAH~KMeq;rJP#V3%z{tcvMY(h2OL2gH8+m;&Hq2p<^FT>eg#_F|xX6x7Ox|-_NV6 zo0kQGF@5nVhSm-kc>9${>5J`k7c;#0xr?CBjY0+?Mb-y<4p^7fbpved4dz`lmL$41 zEueJYG3hI&V#Bxn(2fz-(9hO?-tkx3quU&GsDL@Z+r(bq%q$DBlW^7Nf{Db#{R6Z; zsMhiF7hAv*)V=AP1MFhw4z8}Rq+-ZIK!%Aobsg67__pnQ?O^0yqsw-#VwT9cz&$_| zb#vOx2Js=KdW}CIidIui+L%XAIj~N1Mxh_6)y+mEcri?k9Ze2r4#$<5PO^ ziE8cNUxrIcORCL%#{55!#d`jMM05F`B;aoj?HkgGj41 zEP*TI9b&zQG1kSug9xmP$quuUSd;z2R5-l0b4uS8_Z6PbV^`5OW6Sb7a5FTwyn}CH z19%q51KE^~{Mqe52Mo_GKKYfPNGmM7l-a&hkoE}x;P4oL<@2zU`6NAOG@vP;1nw>J zyrl|5(cmzL1x0x3ZKmJ#hf=Mou4Q)Mwl*_+&LE#!oAC^Xr=+ALUR5u)MI}yNV@8Pg zN1qa3a9vDQVf0=3EBVlh(TtPp7W55=IiPeIcHG-T~~YepP`dc zHQT|c#L&;WsoEdgLAU5K>mWNYfLmP?R$G^7$Dp-H{ix42+#GSGC|m4R5Kln`l-UF< z)w&Jtdb}w=qm%9yUB+Px_evCClI3)tefx12MtDR-Cp^Fdp5jKwBrz&hpPOre;tmgI z0$C{M81YM5oTdT4r?YM~o31kC zsqbg65GM=WxQ|HR@{2lV9{)w#y8XS}+AL9zDg%9dKhN>~WZz>rURSUA7ZFR)qKgVO z#Uhti1urt<4N?rLUXr<`$;{kvw6XYfdux7qtk&&s1oFvNPBVW9#K_&Ljkj!;NYBzL<5@5&Ti~*E zgKD9|%;$4}IBF-&kQn+*_6^2R3eKQgSx{N}@LU18ROzD?s8gQXNgBTs?R@PAI=t4T zFKV90_tAf6o{Vaihhsbdq}KR$KU$xGGb++C$Idyr>~0T-n{moqXs8TxXuX4ht^w)j zt{&Tqet}g9Afuiic4f>+4t(8$k1HK1`cg04g3Y6Ue~#oW@Q2SP^KSoRH--7Z4I0et zWqPw!g+&)mj@T5kki38JonLFOu0Pz?t`EMwf5L@n>N!0DDb{wfF0JSDHX2$}c5x=x zz|>Gy<*e#@R>k_)ZIowr?x}sB!wrZVT?%X7kV{r0X)j$2tUk?%+Mh$^V;A~H7wT?r zplTAGFBQ9+5<7)br=CLV)KSA;*rSt%kTWsH0&6?JJM5}+8`p?{?j#@94c$)0{Q3E~ z%7x{lyzjSWSZMJAUp4b$3jhQE4=BgOl;40*(9}6)Xf;~`CVU1*F0rA48rR( z!=Zew#s#|xe*#dR^>)9-F`Ab@h(ZS&qQ#njg+@GPFFSrl*FXq{EZB`gEZ!>uRHOMZ zLFqvSCqpx^m_!Z|zkQ!lh+c79i!RGYGKaPQS0G=?HcZ#9iM8g@HkMu7GFG}r02d6P zh`cys2EAsM>_XMg=VM-LKAp~s)Fmu-i} z1ZK|cG6-X$a1OMNFb4*!;-AimkXmBeM?K!EK&r>ZE=w?Re3$e^2Sm0@-F5zH{u3O! za2XZrn5U{E1Rk_xdEVdh%xj(kTKF7M>tLhDuk4emS^)Vtws7{ zw}7b`y&nxmGvgdqmXdj#yf&$R(ZZ74;=XPC0c-@e@cy*R_FK)&)_UU$7C?X5;F4_i z=`)^pZCGCPi;n${qd7=WU(pE{4h~HTRKa0jTa5NsFbJ(6vF$_i)m>Ygx>+lBL0)a5 zYtAwM%v%Mp#-J_sSxn#H=3wtx z0tUG^+S)0j*lqAfv`!T5CDZq!E=w?719aoZM@NYpD=QTp;KcQQlS~PZbIsGXMe4f4 zXF{WKkIQ`-SEhe=oD_AE1-|?~;N*UN%fGa9VdmOcD(IDyMJDP`YACS2o4(&IXy*U7oy?t-JP&+=;W+=Ha1HME1Ei$6 zjRsIvJ{=dnppcRE&^Oh7g(auL>vt|SVvuCIXa6JLZ90**g~zUp=y98hfmrxb*qtSq zB6jIH5djSijG-pJrh9@;G_Mj# z^g=R{*kd;M5gN@WT#qGN-n7xC=9|OY3m*rD6ggfiGlZ27VM~<2zjJzcvE;I;S5=iB zY4NkcU7LDxV&bwcTXrU(tT(3U1I)kNFK&NYPm#H_XW_+U|4GO`=~0;Zo&b!DjI5st zTX1l>-i{oe5y*4eV|IXcdU2tdNB!py=QiSlB=iNOsqy?f+2P^3$>Iazu9g7rFAi$E z?)?Y$Zc@dvo_)r|jkANz5<-3Pjah7t8uG=P$ydjxj_P2&UFjR0@79!=^Pi=BO{U}J zRkc0vsBlm+J^iy=l5?{6n4mNBko{(~vmsy)ZMr{$n3Y6xPhZrAYPuJ)>9qU`{KV2w zoU13fNY0k&hGkOasTF_!ja6yWaZ&HW&U>*% zni0uZ!YBLF0V1Ec#*0609=R2&6t{=&J_+gplFl{stuIdh6yns=Hk9;2#l<%?(UG4@ z_aHx`>v9l%OZ&RZlpu)n*^hDWagQKwr=uGaMzZ-P+>N8yPUAx>2b<1??;d&i;)3~P zWbMR`gezZal`x~u`#*AWhlVy98zrOQ2aY_&->BGH6J%3s2hmw=Cx6*g{ zL|)Ll1UVkVgpI~nS)A?;Ceg+#)2(b&JzlQIh;(;21J&BzkO|fJkW8$K9Lp>J$>v?s z9v-{CA}a?XvGua(>=j{oen8X(1+Ii5x`}K1=V}(6=9{LDhR%FK8bU}7^cO}_S2a+s93{Kk8)JXJ@|=_JVh~M^ zw$b9wG@HoE)>>se>)3_&XlHNrD*04;my^F<8*|bK3}Epw5*m4*5QHyYqKfef1xDN9 zbUJja{{CQ7E20RNND^kkC#&)6rKhTYUgGnLFzt1ihQltY!1kQuR|TnVGL zb<=#G=<&Y~dskN1q8NU;F_yN;F8MSyF)_iP=s5SjJ}L?WK0gB%IWtQb?wunaR%xOMODe1WLb?{tvyWX?2h4GbDmyKqWR>KjM= z8Zx^%yQWBO+kG`&*2h!f(1Y$Yv(OYxbxDm#?eehBBrm4Y_T6XtIr+EZG9A?VZicy8 zjH7ZLBq(W*@W^;H${1;6^gI8*4WWH&NW+XNFWO8E7}#+apnvk+JRQWVXqTi3G7rnc z(T}U|>F*sYBu5)I7xx{j@FFqp!O?qfq!V`NC78EbQp-J`Hz)5IpRwKmcepF`+^w^< zxG2jPieg_z^n1_hW3TomE@kleyXs*b)_2DqZ=FA4)Hh?k(JVYC0XIi18@Q`8K%@*v zEq2=&+3$@a`DcL6e4UMd-CdyTebny!&yk%6pGJ(dsNKn9g1$q}pw_BN>;_*b7KVpf z(WFkgvG2~XO;vGa=XEJ{>`zDPbZ+LE>sb-KSG?FXZB#Su*8zi17-C_+QK+b>psp&+ zBF#?dTLz+NmeJumeB@x!i+4WyQ69-4)cTx+qii5)C2bN`^PtleEpKecLUn1vC<47J}prS@oFuyta4MeO-m-p{a?4@}zs6)C!y$ z*7C#OXAvKJH^Zpmcf}l{Z)8)wfou1Yj0WZE8iq&B(PJHE1w@`M*;|CYD=uuA$r>dN z63JSR#)7=oGI`;GoSYn|($&hE47l7Y%jYzrX*5HS1muM(lO9x=_TMv4_ojFmv^A5> zwzNI=e*ocg=$67Y=pN(aV)AGAXKd{_zP`j}nzh+VsLjIfIH|6zXD`Nv>EBy^D3QnQ zT^UB4rOXP{Rgh5?&Q;@YJF9(r*&Xfr)%UyYZpY9f8SAok1eVxVikB*f+ux7$&QDp_ zf(X~@JJ*S#OFlVy+NqWeF^ny#gzld*6f(i4`FST9W)~rQQCxc`o6K+Ajx4pImmxgK zED}vqvHS_H9782NeO=yr!pKFA?ZwcyqYk2Et96PQK?%rGs{u8k>%q~SJt&Y3&#S9j z^SrgS-9U4JX|2!tAtOJe)OGGNKAEP_B6q&Lv^8E_o)LUv5=I$NtTiEw?%|Uf;_%D! zb{{;R+l5{YH(mbCv@p9;45fe79cwkLn8m3@8`|+}$I`~jUBpo+DdR|K%KmK{t0v7d zLl5epyBi-}^L77~H28NiX>w_y{DF77mnR!l-hZum#`b2B&xNM0EYC-&qH4e{(0(qB zrxXiiDmxNw(w+K|kLv6Htk}Y9cfLi(r@lR4_?&0M3DzTl z8bb%;GQHuWQWmlIcWusM1{;_NZs!zM zh|hP==Mkk5FfFDd<=J8C#kTCtxoCO#fyXNt(yJSLjB6(tQbjM%7q%_dBQ<9>WU4Cm zR_24fbY;{Ncbko98>zh6ulw$(Sx*us$OWlloES}%yRwcK`z+`!E@$KFi&rzT9Y0vz zo_5rg_nU}m-V*LBwBnCe_2+OqztS;f$g}EjQ2ykI-6XFd(iIi(xX~$Oerf`P+Jhia z*vloL!UpH1O`O5s_|Q;rD4oWPun|2DFz4#XZZcQWbFZvHjg(7r(ocSM>uq3TvDL#q z+2rI>n7IJY-RHodPIcbrpG|HAnt8v4Tovut(0O8;V;y+s%9_MFP z0(V}w4R?esZH>mF)$KWzRC}j3W&XZ6PnRtGwHp{V$&j^9N2x5NZ8_}3LiCUO?@1vt{z2GkV(CRC zUxMA`%S|?7&*2~Pxub{GRt*ehGk4d_)bI`0j>0|h21M(wf>7RN2xKQe=~G5V%)?Fl zLr~JJ$VrwVBGmWN6H2qVtG3WR-5dP-jbdHSt-pKCl8 z(}=72me(y+O}LN}-ANjxRM9NM#IW&(q^K$XRjI`vu$bQU>;+34w9k**LP_~WES%3i zMk%7|1tm|WUK3kf>lXnLZZj1!7|d$Zl9j_Ps$()c~~W1i*^wT6K^v0vn} zA4eS!441K4{RxXk5xrd-=PD=(k@q-?2$_0dB1OLrt-C;3DW`y?PCv&haRpiXvo?wP zI`DaOSq+Ip`HPjm?7Lk3SF>TuV*8_@tg!bp-WzLA8!XJ~ zJO{Uh<`9a&_P*xBzyJIul-yoPX1#s^9xu<+xA$jCF)VGNLh%#h;tGN4R2R0=3HHfJ z$39KGqsPtKq>gjUFHXZnUYkdU{C3aE)6>Hj6iLHT#z{j@2F2X%1iw%=g{ZmQyRo1s z&j)OlQroj0@=ZyqxKuW_KA)tvV^v1#H1LUBm!~tq;wpG-oO!uWt0csmkModW(dmUK zyouHPafsFEL^R-v4>2lO&fk;1QV!MaufpoW!z0-vlH5Dw%B>Hp8{Y#W4?_7s$tWp> zwEceJFBOL8*DNktvcVjYK1zi8{O4iU>stgKDh7pv0?oq}NNt>9F&tPAf3eu&W{k(sQoSwQK8+G#J z2j5~uwp-jYyE9*C(Y=2%p57A&O~z$GyXOn5b4AuQ&pOyi@NDeG-I5r0<31;XcldW} z0@>?b;dpfC%kWh7o*oHx_IsP)q%f8-a)L0?(UCWD*-4Q<6$pOZjW|fr@Z9!eByh?~ z2UM@fz3VVkp!zt>NMEv_ar5>a*UGAq!fV-?n}yIjp{31q95qVDSVf$Ef|Y%ZE(^n> z(s|kEKASwVaD1H+0Jr<)~3pO{eLDZiR5=8T~fTgI9c1MrhyxzDpzRhiD zK{FdgK~F?$E_%spXwfjti?-vZq)@F)X}B>P!RgOU)o6q?mb4eoJzGrTc;NH`Dpn5Z z$9F_d;b%WwoUFRs+YdNxQInI$*=y+9s1%DH(m=4sSvDUY3BL4N{q}fm*8>uK<(*yj z9f#%o=XwwpeMwyq8yQMk{;1Jhad+;Qf5k>zkEU~Zxz>` zFqR{^JY7?tnnCf8^A(Z%(kPp#OyI5=!I2HGfTC9`Kkeg)-r|B_iJN6zm$QoT_zmJN zCoN4mg`UFptmv=)8SF@q=b`<`=V@N|G)6PA-%^n>?|$BWotl56;y3h(2`bP+V(2mn z^+fWd)RG!Ld(BRVsi`Qh(rU;${P)7SJ4+R0(&UAW$AF1lIR~~xNzzivqxoo~aLDxi3V{Skhd?y~@e(%4z>Nd38?Vlll zB>Uha)OOpgc55s}wl3~--FoOnVi#0~UP^VvzQC`SJ=*ici_&PNbD7H8fcK^!CUweK zRrvk|jg)T4Vo^iKYR85lxtIf{k@;7?VwJOUsyx?p6NjJtmkiDL+U7AO6hgcMxfQw!H#+pw`46CJkph;EQytowsax64eyX$NGO;E8R;FAV)gA9B48 z{qo3@umYEy{WIiXzo`PHAkvPLhgj#xQOt2vImI?Fx3Gy4mGelKBWRS^Gp%R*(_c`! zf=5rMF%4|Qz!1!>U4h)xS=6AtzNcxk*fhIIjnnfldQWi|j}{QA)E~JqF88d)pe9UW zwbob}g`Q^Lymj-NtB=pp^F?7xTzh^I^%ydp%GR!H%zRG`^e~aTRjEcU<(C;Y5l!^XHs6vPiLW%_ey{oci-= ze%;kgaBA{f#sDMY*lg(3B3r)IYU!a!*r}^jS=!LYIWDnO%H=8UH*uL?^KW_|EZdjz2BOzOj_yqC|&ipLYdOb-zv*a>qK0`$6gNCFyF})El#d&vC{{gQ01pP6aYN*Gm7r zc+QQ+*CL5njz_r1SHZN0p1X^c_KTUx9BFP!EJvz(W@)U`zRI~b5-47j&wAWBE7g2v zR^L4jxn^2FSU2hFVDe8FVI{-6&txk4AlZt$*H>@v11-ks;B=FGyxoWl)eXQ&V2-<9{SU{lsCT|y_PYJe#mzuf5)mW1G`E9QYzg^AAN|C ziF2j(f>MngC4$oaQh|))TX}MoU@KL$yUXax*8HvFwYKDB{9NzqVmW&pjsED!Q;9dy zXrF3unP5g%Pd!l{2b|oLyeax&2t|JDvpku){#p;N zM-}iQ;g%B(O{COK0v@zU<@F1%r@!A65SCAcJUL%-b933wakc<%=J;IXdQ4A20lU0u zmzfoS!kL+wGmtnce!+&kW@*X7n>4pO7iGrhmuGh7TA#FYzC^*eWlrj3`xpmq8jG}- zpVZlvIHZ}So`u3L><~ky5|7IJngU;|@wz|bog=q7|Cw9WN^T*r5{&Dx=1v4iH0A>J2==G#%MemG`}XVn zYu#_-o{^E0f3@!djC)3x6b`aMlv-V3$$2N?amJI{W`B;k2%jppvF^emSUv$ku-uJ_ zrB^};A=(*_>HW-0)&>0_2AYNY%)^peA?68H*z5`E+S9bBrTgb!`jfujpnks8-tmJ@ z`U|L#Yj(EgtA*E4~QiYYK}zM#lS?M;u{+I;=OPq#B#`Mp6Bt<+va*iw+MinpgkjU&Wx!5GHn z(h9Vv34P%&Lz3-ptjN)A=Fi=IolINIe1!0WeKbid)^HxnhO|TxD55O)Vp%bL!K%aN zzw+zg8+o?ef(cXXv#Re?4y#G9HR#`$x2O!KpE4x~kLxaT!FJc2j z@p!stGy~D?1%xJ2u!mKRESouk&X3c*CI z&vm=C->&4;l%d(PPzOOaEs~MP;#<{iTM7|@aNboaHo&HD(9y#~?DMGC9Y%R5sfynq?pMJuD57!CFYfa%OayI{-yzRJ-=(h@b*X7_oGwwB3)ZB;Fpn!o3FU7>g z*ETmpK7M=zj7S)Q?)z^vXGK0=*r41kM)tW2wOElHW@d$O)G(if@=p|s)&UVa`7bB( zkUOv6hjpt_BawHgyl<<%=@ZF|A-S7CKF|%33o6Y_#M+(SI2h{2>4a@_506e7_)yB~ z9*oD5-|N~$aWX7ZYcle6xPVeI^^)%afq3OfssA98CYv?~m;j)#E{|eJU>idnzF0!6 z2M`3I6M1&xqtYI6!^6*eu0c%}8F;o?mnOPb27#2n56Y`&zMophYtMua6(~&GjIO3` zlzLm#3Pfer)TWeK7DW-U^T{c8OPX%19$Je@g$sgQclTZKk{Q#(EtRkI1z96Epg%k5Ac3KLTF*kqU!709=hHop-cqCD z538=t5jU^DL3}cXYEemBZ2pWpb5`y2MZNV*2M$l5j@Hrv)#8#mjw-Dcai zYqRamZko;Z=GtuAc73z&H-Eug*K^K!?)%42XZz!0beG*6ZSZv66%+ANX4?2KIex1O zPr0Dc%U7@k`aq-E*u#s6e$8NccO2ceadT8r0li*5nw#9yza!B!T2X7>JgCl3kVE)8 zwpYw;!|guM+S1aJ5P{20UdBtiwmXd>yv_Q$J9@c6CUJbSb{cdx6&7|BtGXKzv|H7+p>6eU z6X4rbqYHaf2uKx$YceSgy)ZC7JG#9(c|OUwKB6cKD3ScSgQ;3}m4Y0%|zJ^Ni2LDq1uE*cvD-!JO=VwD@{i6*PR>#PZT zwgrL9NL0nfYQ6~#`;`X@lw5zb?>RucQ`L;_+57>$9RpP2M;@1pDcb>JJoa#dqS?vk zv(-0!9lf85&1u^P`IiG!*<^>~-;Na9{M+(!sV;#4+1(yca?`t3iI;6btl{jNiYjQy%*&!gJnpPg=Q) zIASi4lED^Xgj7G`8g}ou7VV-Q`B{FEJ!(lVcr#l7dpq7kTyr(1{a_=9^q9bl5hGh( zB+d_>frCaP)y~Yp4;Gx1(%g}aTT-15hFYcDvU6A6kvA~ape19ISg7M?fQk-9-J5s& z)ys?D_~*1RR@IGawtPotHCux zBGT#r!mG=_uFz%76tZ}U>fo>ttX%<~<=A3BS(#qo*)=!zuwc9YIxv4}pV?Ro?2>oD zSE%9F(#QgW9W9V!v9raoGkpkfeFMY5fCRf_B2{}>5Vim#XH1;%FMZ2}PG7*!pr${F zsZRwWjFiF{-kCm1HguxdR7yyCV(`wqSv`Wm#WPK>GChN=Lc$ zFXt2w4=-vDNcCpui0$U|z^UpQL+;Cg#X(J0tQsz{Q!I=Y5o5aA6L4?ubOh41f+b{J zi4+)fjcMFg>Zl!aVNF(;%0cwRa7;y-w3Vm%4zI}-OFnI%yNOf(#vnJMe4*2f8NA%w zh|z|_ep`QJ{gTEI7AZ4Sj6pb$#=N9NuzAyT{R?5>nZz**4QiU}#L*q}Cn$iXz9Y5P zrkZR-7@g>>#)bn!cs%8lOOLWe(L))w6-+*!C_?53rEUAGr7P3 zfm82>jJmJ9{`U`NkBFtik4Y8-PD)C(#*~<7j$IGG%id<&l6rS1v240M3z)WF!(Sbi z@)O$L(OR%ZOyZ<6891KOh?5_QrcZY8C*e=jTsao|T+VkLC*G!UHxFqAL_O$GQRBOm z(;=m2xB9GzBTys6^m5eM5$qv@lcwFMN2}Rb zXNn|ZRYvy+p*TRvUH|l%FV%jyET*u=#>QKRhh^S9tw)b{XLWP3O0Kb)j`pY>0FO7R z0{NfdL>4^LYN+mr4)us>`^Dpi{4OKZe4ZCdIovGR;2Oabh)yn&&9tc}7=Oo-R4ZvX zn?^R{U|xw6#{P0!XiTR)lXu=fIQAzTfl=xX0rVdI^1`w=SA%snHh<0K2(GC83I@jn zB~#B89AMyZm-~%Y80iV~4^chso?XFz!KM$a0N|FdSdEXGM_S)AV6|!#g+lCxIgW)_ zJl`ur+Kq2~^K|)kbyJ$;=cHFPYRtqWMs*rD3%%VcD_Ir87^3`+W{a*pS#EF3DT9co z^$Tgn{p1HPl5jowm)hvxE-Oe?;+6p*io|i~4=$jUIu+n=B`#3wZwLADFygLzJ?g#o z;q(oE4=4%kSpo2jyTZyg8-oEZ1N3vR)_`E;OVD#?;hq~eLG9bhZOFarSfHIS@?ISo z*Oz0fmYwBHINQ>}!Q)5v^mKiYDon@9=vH4O(ZoK8$dCxr(i2eX8nhhd`SKv2UtLsW zBAETO?1gU*$eeI@6V;1&qxB%F)?I6?l5X6Xj{p1L22Plm5?@tSHOdeX5ka>jJ6!T> z=T$vWI$Ya%lAbLRh(z&sf4?D-@C)BXgV`7;!=U7)mk2mprBT!O+iG@Bi+SUlT!x|F z-c?X`ABo^nl+~wpa5C{bcEGzOfqM!kF{!J>jZh+md41nIpV_VHaO#x`**>OQ+rd53 zPUzuw?0gv4eWY2&%QU(!&0adN&Q4}i5Qk#xdv2#ly@#=mbkSF8B)jz`z4>5|1uSyl zZ!~)b2B{R6v$WgGOM~n4#<0pW_P#YZ4A#ZCf?r%?sv+n>yPwEnNXfkHV*)>xK&iWx zsDNcLXXl3d#3Gc34+r|{e$YD~ib4E=Jv_obMGGI7YI5o9TOVI}=ZJZ`%cHcs6lBpo zG1&U46Smp$8{5D_edqSvjO+7xKI84u!Epmv1#tG5AOq&+Luka5yQF_Ij#UhOrd=q5 zv#k&w9t0$6$Dwdwl8-Nn!g`VVPT2c=4?7hXDQ~9DE%R%;i@F`Tage;bCtYeJx|r#o z<~)({Dr`sF+fhv2=TX?$^r8ccgLZb=Z{vPk>*VOAr3XzPVHKUVpPfyzKOKES&SnKS zyR@aHn02z{DJUok+S^xOjub;{bl3L6y`A7%n(k#A)@vwM$K=zaZ9q3C2A8cq7Ll$5 zHv$3zz;foRE~qWAQtqv)uB#8!jqD>Hn3?&(z;r4aJQ)-0XTUzfIUIBkBQWG|8-5Ha zw8tXW_7sV|s!Of3_oYs~s{U$8gL8HizT+njFMy$2;oXnAXGR2**@X|^r`GF2-yVC- z(f6w{1Rv^Gv0*egGJzdn_7*`NSZL{w%f=87CZ;!$tsI;nGK*+nfaTaWyFcMS?LR0x zKOFDLnF9raP@KDgThv3?bzC^9m1ladtLPJNoU}=JU2d-sQJJ z&C>4RE{5Z<6(sXl0j0>&Tvl4>!`QgNmtgPtmF;5!#OTG3He`5W0msv7$(~i<_Rc<| z`(qL)iS4W55WkRN^?f8h2PlA?76~<5-jQ*3+EK#s_~brb(m@HGi0ZUc<0-U=iUQK_ zo8~EzAkoasj3+21<8kH7D){<3%AC!!vQnRgGloH3FFwiLF=z5QLV7K>*i&287WpI0 zf5YhzXgEFV_df#+s|m9CRA!F&gRjIv@wsHXXJFRA3mfZLX1i;DpcC+3$unTPxqfj5XaB=SoyY2eIs`TF9-s(I4 zFZGsu&Kr(=u9HmTcxbU8UNVPFa0(wUcASS)@YvX?DY@q1n*pvgr(FUoKwG#kQEyYA z_)8)xy9zPNnngCgk8rjjRtoDP5MS2c>VKEI{q<%=*JJm?@}iz53Ev}<&l6;eZ4 zv&XA%{2tfKSRgkkg$`gDHYjF6Y7;F`Lo=^gal5>G$xL3_&kBByR%6u(WWTku6?SmtA+roQDT#8en`6b z)dz}og8~us$z=k3*mIc}f|-}SlFcoW_A2{dw2*b8smdCJuJ#G`yY=RycHQK2wL75s za|+18DJsY2?8sl%GD?WlQ>~pkBN`>o9ryHb_iV8S?d>8NPjoROA@M3<#`Fa|`lp2* z4(bc6X$2p4O{f|h(5HS|ZVGarrd{+6C?HDVY&i5bIA)_`u+DmDsYMbuHlddn zq}bx3uY7j?;2oY_3VIk6648|8t+ye&nkVYP2?!oe4;UIxqH^+)5eS#ac!b8(LKmPF zP^ux2b*Y@EfPhCW4SoH}K8_+y)^+021t^7`s-y4SRVTkvKZ`BLbA@V)&_gVqVj1m= z)*vX(tCBQpBioHSeU6Zd2I{;a*%jcp=lK~3A5(yAQ-Jj{SB=wNP?Q#Wbs+7IIEG{XjAU-+F z^QqmO(bemyrqvLQ=5|kXlZwdnO2u&$$fw_;CS^`LT^bWne@%8|~P*O%{-mf{4QCx?|7Ari>q^m;4zuBz6g=7~gQ%TpcBvt`ETrjN5!w>bkI7qCQovI7k2S z5&d|e!*&-F3&k%pqoe%VIypJ{G4mCVeqUfWh7~jDK#GAj*}&(#ktus3%5oF*R$I&A zIDx!(CdYqEXl*_~C9c}*OE8m%#PSJD_30~Ut}UzWu}%V;VHrYI?H zihedBVao3ueNCJFNa#{Ydapb{_H8YKh!4mODQU07Y(2uX=qxDyazUUUYVq;)NxXz? zs0s$?=f2%^ZT_~Y)YI%A!L{`&IiUAr6ipo@?R1YoH~4kqX9_X?>Z`P^?X+aowRTBy>A<<^<*#IArmM#c$V&{Cr5d2~ z3x&$umqK_B?6c_)K1D<#6%gfhUgC zd;WBhL3cngcm@?Ej5X$8PMHT>t}br|cU!-LE@KtP&fhI9{cKl=d@xHJbk*HN=QmQxcg3%d)aE zIu(A;tF6vXCrkyCw6%vvQ0Q68bmlu~`ALfn_l{9;(UnCaf^xTak4xYNTiFagU;gRy9G9LB)YuhQ}eJ{#X0w zOcS4wd<3`kOhA#yhw?NOK(>Fbm@LVf9JX&;Bjyz2*l<<{4^}lH>T#L(xL00)0Jmrt z3ZuG67gv=`++tZJn0e`oaZ~{N5H^3b)w~1wTqjY@v5R43J%S}b-Q;(|VS*aD>_ih% z+&uZi%4T=q-&k6qf?(s-O_(%6`ncDb8hFq$#Dz+4=;DE45Ug!0xUBj96^RHlZfNfd z%0=U3WxJtGSt7eaqU~eIljt2IpYQhkYV~h}(ZS7aOw9kLurTSIj(&9bVy)$WW+wX_ zQGUM{S9atuu_w!9eMwNc&sJqsZQssdGz#vc8+Fg}f`@+_BtnxWE*|$X9pAdH)Bc{A zu8;4izhCKQ1UB~q2!vPRp8~&WGc-U^XORi^0qI_tZKP6|k&j3Xv0weXzFS=J?8MI| zJ4*Lxe8KUvO0mYN0}2=mMOKO-M?@lX`fy8o?wiB?6rDNh-qu-!ySitgOWj?lB}R0t z6Ioh;*-Cv4yN7j-Dc?TgJ{3|S~e$l&V5bYzMFlH z#|Iy8(U~-Yoz|Et!ZQso7DnJCQeorZs3e^S@op&XOa=Trg94s*f*{%J{P#K40^}fs z(&F$fMj(sY&~q8=xvT4a!#>KzP5$^_Xg;7R3jE0r<%Q`W39>TV?u<0zs9^?NKr_XA zSwXWQyDsnEiti`q5W5Y`*!1o7@MP8XbrZhlAB&f6W&M_}XE;wR2IYYt!jnbh=ICiu zn^D&JY^JH4MeSt-IEXjG9X{ml8g2!{ROpJ;_M_y<)iZ+Jo2#b;iS`UH7F zBU!B%_mM)cPl7#39IA~gja$%RGJDvuqFRP%2eEu$7pI*y_g9t- z&>US5wmyqXs*uKkk(>}CRJcAR%Zo2jqL2y+ppZ9ZnO(xEmt-M9laY6!mys58*@nuY zcJC$ON)*nH(5@q9trK>P!Ta-;_qiY$DUJW`n{Ds(o|jj$!L?SGYY_ikK7I>ALe&)6 zN7xTlc?nvC$Fn&V@8d~q*_Nh)v|;a`iVfK}6HDgCZ4KBMh!0~URneA7MDh9dwEB%9 zQ3!I=S$v+0#d28K@bxb?&#zRN_VCD<-ctuTn%uVEwtJZt&soHh2uJ&*tzXgi zHyO_9up?}}Y#^ZRXS{WVrK^w?Jw&hBkLAiFu>1qzZz>(4?=KL4+M$w0F}`e+5jYi0NbqpmzIC!vOKx7HC`4Y#|?fP5cZ=!=)G6e^VY^|!IXv#K^{&kH&?wh=ty;AeX+*nZx-$O-tV=jS!Tm3-zOnoFP zv<3glzcT2d(7&}OHomD*8;ssgo5i%E8*{goZfG1h4xfIZ!JJQQ>J`@$9cFX{M-W>4 zN2$fuA4%)l;Ciy=ocDK8wPMgT;?^udAW=T9jZhzj351Y32dWL_;n)Xcwb#(JhGAf| zNr*p`SbJ5KtX1%u_i2qxjH+nAc8?W2(RmG%gD?2fO({SU9Wi|Vf_ca?yd-+q!?I3i zWk3{#KNM6Ssv)j_(8<0HPG?l%AKDToVzt`m$1$@K& zx3|QvVMYnu-k@G+Xh_`?=WP)fpZ(qX-)coRPi9NW?v!1VHp8b?2=mm;62GKV#>c1c zAurdSGqBeqQbrjKM?d5U{Z83?ZSlyj=v_>v+$S#l@B|1vNu&{3kCDQ9ZX`n;1;Ku^ z0&A{i1LK#GK+2(=<7i1bAES}Sf}_BEy-_>JVUr1B{8-5;lAzZCe_~7>W9V$)I``M; zCRL@`O3WU+E%pg8Jd!dO$@ZOrz|I2J_IJpZYHwo_C%4dcZKx4k%ln|E`K~KL2^76rKI!DF2p4J(35wu#6?|6UhNL z^!twK$>%&jmm;XuQ{?M-kg}yB9~^d?=_(9Z2IWJ1SY&9%Zot4cCl-Cg>Pi{v_Kdth zfz-%|m4Az~DcHB6mLe+ZLC+e<7vG|p^%dC7tnV;MmbAp0=A%AvGnc0k)lA6}5gVpB z#|-*5cX8Hl#y!mHYGY*$0@9C)(hLDOmH~*)@>n{!`Gmi6=BAocxAUIE&)3)kVDz=} zA`aq+#p}lic&;8+MApcg28S(8##}1Zz1rKpEk|_TIAtttY>|{y@*($JMT4>$BV8nh z3x3T|t3E#753cd|KvIfcd(KmvHbubVaEc*G52^aN_m==rf8v|N;v#WNHp;eK z;JSGzu;!^!YP%o*0RjM^aAX|5NhB{Nr=$>J(EA1wl!aw8;~HRUZTdK~-KS4jW6&HP^GkhXFd$kRP3n z=_(TTCr3lC1hPyHHhAtS>_&dVL>pSFKN(r+)UCLC$DhBDVR7bXL=`fFTT|c$YPhEaX1b{6Nw0~XJDTXBgDya2qJ*lsWXP#t`d z9z1cBo&n}j?KiE7&8z`>II7@lQJB8bYw z+xs}!pJ}@zr(7a2B@Pu-c*7;+Gc&HFR*Tb5;7fYT!NWM0Lp!;tqwZ-Jl|!<0Hvq0G z5t^3(y3j1YHB1JbZ{05bk0v+1O?KrQ`@#DXW>CNh0t3h)eG^NV`j{0~R6i5{=)ZTe z^&JIJMLWT2^Kj}D+>oEydg@PVwh#UClutr&yjo}1f8pS)D+B4~N2qjBLQx=2_MZ?P zm9t6R+TcZ&l>RP-5!pk|;AI#e;Un_nWZb|Q?m0~7y3;nf4jO7?kzDYSfeB!y73uK= zH{q}omM@!i_QM0Ynygp@2WWasxep4sZ-|2yV4(QwJ|K+$y??NM1?y&0dvjno5wvX9 zR7!i(+B!Vd6z#36UyzNS6_U=eiXq^@OOfXffMr>;a(8*4Bj3&eop?5c1p@;^l-A`+ zqwGCD3LeWBP|*YE#JzoTfBPQeg3I>f%!ty@I}J1=!&RS?uK%oWPNEr zBZF~6gYn7mp_wfAgM)j%?CZ9Lf{4cMHF^J>#$ks~TmA(93XPEbtP*!kGd{n{%s~n9 zT^S<=v6Wxzn?g{@*%XraO#)l%Y}+vlZw4Qzcuz4Yo~9WV{0fGn6W-VP`&A}tU{Wj$ zZsC2OTcuz_Ss?DJ`KCSfIi)Wd3?^Oxo<}aW-dR!1oVhT zoOT28pEul}`LH<;26s8H_#YN+VClXTVt!V{Z3D@shS8erkVd{Fzkp>ZAoJy1_46JR ziLL;R@VI%F?qzB?f6Aznmn}dv+JecQ;{rkDx{>4zx(Vq6;l=!2M?qg7cLmE%^RKr61!+*_xYlt6qojsHuvw~D}G#Etg8 zOI*v`v@j`jE{(gVB~&ViV)Mssaae4+FC6CYsh6$gZIWR`zYHqtO=3gBwwR~iZlm!R zlbu4LS|;Y7sQg=2UMnB#H~0VBxk)|Udi@{6t?zTr?&bq3J-yoSx!k^AG}!dX*RXuI zA}xIS@$>c{aUpB)BcgAGBZr3#UjKsOJxziAw_AmZy*oeiYp1p1cY7}fy1aJ|MYCQ5 z-MskI&9&l-0BdOmc(YxWR`0ES`fb>Jh{d`q5qE=}L`q=(G2M|1dlA1a1q7W&U%Mro zToZk>ZBA+U*OvOZl%i%1c(hVWZ9i~dO}@aORbmmHQJNzHugvYO+2t}(rfnW_*0g4| zoOf@nNiVb(U4@+_l)y~}4sX%u1>87(ILQTFQMgR)f${hlX3=|bXFeM-;awc0B-sZg z7sB&GL0ROjd@hUpPI`s$SmSLXrNtO$}tlXoQf;FO3Jow)ij*ai53;rg0IIK z-aKTnGcavUeQ728rx$mwu_lrAxQ2-jPHAaag^LC1II(Mn%@5w|^IB{6U*Tq3*5>T5 zMR=4z4ciQk&NwUPGh;SO76(AD0PNL6=<#WL?0DN!ScN6KZ+9m`#eP0{xd6t+gmH4t zMCVO+PvERsp+4B#fyq{zdQUwYK+ZISLv`t-UCvM~hxu(T1$)7l{XVDFo^8)L5z*>1zGCN4X@3_FdOfez? zN<=RW5b=u)>FYmXb)Q2Rc~F}rqyDJ;C*zEn&Pz-V{$Lxqj#|Sf>-F+9aU)}qB}j#M z3Lb}FyLvFkobJU@>HOBM5g8X7g`zUL@7iK#2z8V(U|6y+Zt}O{&^bk%_oOm1Ll@f!Om- zsazTsY%^iTd~)A3(#DWpZuvHL*cSEIg?f6Jlp@&<>nGIPCJWi$o17|a2%6bgCvqUM z8iBRs1^n4HZs0}=UO4IORdk@NAoHjm_sbZ7kIx~J3;*=N42#_#aD*Y!R?{MUFBy=m zS7KYJ_J?J7)~^Mi1-Hv|G)KuPc`fLp#JV$QH1j&qaOjZp=zs9Xa=x9Pmcda_hfJ%M zOTo6PqXOaiV|OOYAKDud!{{1ODBpvcCs|=2ydGFzR)}L_%y+%#$~m699=pxpgStNx z!~_u5jBxUKi{5?rY+{$UyR!eT_DB*~YyOnIa=kFWoBFiPnjX{VS4Kfj`KX@pqpDi5 zA=udRtfW+4Y!A{-xk2;amcbZs%kH>8&*(%UQ<9T+ z7Bjm<_MNIAA-q(u_L^C{&DFpTVN25@ZW~d1jBTvJlxJlWq()mX9ptp5)J(-f=HGa!Rh^8)tHP5Rb(Ek)_tWU85p?qGd< z)|*Lg?D)3_HuNR)F$ujsSX7ifY{_-cG@dEy*tkmfhQ~&|(LwpQ^h?fbk z0xoKJkn)ENS3*H&E-MRk>I6~Ce&_d==VaZE6K~{(cpvQc@HaF{Rg%7)J6=E$Covka zUJJiBhQh=L9SgQz6u4}i_~-==Gk-N4-nM{@>qIyy*#)Lgk+prB0S=$uz@&D=IV{=D zyxs*Z?69)wn=iMS8z?A8Az(+6Vp@7OkXaKCyjnS(;Gu1s62y5u-2!!DnDsY$C43|e zJC@Av^{G4sU7L#<8c{qLXvOtHd_PAIUI3CHQm7xgDWYv$IA(XxXRL${59)NPNMRa-3rp?y<%^TWyyB6M`NGe zD=)6{9exso-t}9PzVBad{7(j=C<2_hod7!7>&YP*-}S7}i@RB{w21~&yHy_vFB+PT znwSOT&IV;{?LOs{bfjv(jf~EAAGCUtTIQsx8hxTFc2Cp=>_*zJgK!>DqwtKV0Lrw*EOlx9-w#QAB57f zU(9C43-TT+&&<(0G^oxHui!Zx9G7 zp_NT$#hnbv{@eZjIEOSD@Ge8tjjR8fQaWy!}GnW;H3yR%>nTEDv zq}ZmX{5&8gUmYYqomY$!-7p`YdH-=?!TvK=3+B(YEoGMbQS$40=bMx00P<6fk6PSy z%+fnFxzIm#ohj*M4-4RP_;lH79S2WS558x*71?MN->K| z6^DT$+4Y{&rn6&qH?F(S%%H8MuVo-{IE5Vq+}b_ ziHVI4XZ|}phpyeck&l{!$LQuGuC0pKn?)hQ!4!Tcn-hG{=3%PKjjcRUiXW}NzTAdd zCfi5Npn5dTsrKPl0B+nDtf@>lVDIdv=jIWbv)QTaYdW$f09+8L3@}1j36H3f{%O6q z;m^NV_YGwz?E%BXNiIU!qhshH1^3kgv*e$kYmHWUNvZ!sN9Yn z442`0tvZ*qPgI(%T`FaMLQJJpX@fn3w4fXv*u1pprVqQsb4L@A=^iQ$F57NAa1zBm zui_0{8#EsK;i&|=SV((TiA>N#LXF!B>N;mEQyXv?_PTy_%`Zjdrbeh74w~MX`Ybn| z-JvWmat}qHGp?UY!9Ga1ivzR3K*L4hw(YdsfOHRqU2TG99GbC2*0Qa6<#YEPte`Co z3x`5lG)X#kI1>n)ZWu)qqJVfPAf4@~F+0LWm%Hhi(MP)B>x$sdU^c;fCMrW3*i^(T zcresVw$*q~iD(#4OKrGrsJL$76AY&O9gve57yYgDD=8a(S}li3+NA;+jBCqrM>658 zGKqVT6Yn;OBZ{<8PCi^Ak$8jALY2um3>hU6aqrLg*#3UtS0(|WJXPQ-DreOi*??{i zocZ(6J2KZrT~DM3gi9VY$h5OXKnC;FHSuFtE245w3bt<`W$WELoBVA;Bv`b*acOcH z({+TsT}`UKX1))N4+4%$cA6-4NIC-;YU^=-*Ecmgadh9b63A~!@S9v9-_ga5*_&+H z_9$>8kGOhWHKb&D9`^paXKBDtP6keSbSHaoV^OWGV2sd~Iu)muCC7rSC#@FyciWS( zt}*N+5mEF%d+n>DY_G%n0;%O|bhtiv>LW~!b<8KeR|T0ylS`Ly;CZei!IHUfFY5|6 z(11iA+}95W#m(=w-{#T2p|42XKMQ|#Xzp16)mVAFjVamgynIa zU9q=sgHj~svn-62+RZ)<^MaS!D*#%awx6dP$}sOp+HA|o6LJq8!C2M;WD;OXKRF_{*4nt>Jzy;11=kG`fgQUySM!%mZ^w5fb2TMws#%h&03y|UDdl} zuFF1z*qzd5(FKU@9WSXwHqEafz9ptuXjLkTsBP(k(>` zu^SVswTpwIA2JgJirSVVw9if@6Etz!y;ECrIw8fJ`%OLn{!Y5ct3$4P2HQ&RIl@Bs z*Pto8&${UZDJpMWfFJ%c7bjA4GN$LbKUkfSb9fhOd7^I8;YhsjP)LiK%^amwVzskK zdC6Sk7NGrLpEUccW@__C?da5Ja~JZeaf~ZdeF~l^&i;~`p%-I1tzMNjxbi7Z|9{6{ZD@6$GRO3GU5we{ z`Ap!_K(tNtMONu6eG@=`5;`qVMNq>A!`LxZ%YPo( zl)~ko7=hZx50b1fLvnx!Teizux;}N0UuDH%<#|OIFyR%pM1BVySsHKe$>$l(t!gw8 zt5shy-fWDWVSU4NF+TL|Ge`rHzby5b=yA~EmKj5h*G+{VS1?}%nBg0rEJEzoLcje2 z<5Zj@_nvOAo@cF$O(Q zzp)JZ6yBPdnrAC84&1zekvP3z00IRX*pBe`t1sGCWA_s7Ipw!MvV=S+tuZgCGff1V zIB#&fhl;~+o8!H?`j^{HP8XhAQc1*VMy8#|VCi;f*>!6k zd@Gc!N9rP5bfG<*Bi;p(E#X5`(1W-p@48z%3>*_0G>QOuvsH*%PJoG_Oa@{ya3#CZ zx%N52v;j_I&9O7_J`P+}4bR1eii}6jO zGeYlY-XF9-ihLvl=$|N`>r0Us&|`AFW35ngX{pZ)z`7}-sGG;RIT;M801VB&h z&68|&81H?~X?I{7yHB@C8upJ`>biykKUS=^%4F9WzsRUv$teZ=c$6eEyT6K|wAtT* zOhb2{afcv{IHqM-^;%G)hEW&lExRIkdw0s=2=@4+A@BZ-^r(hpcbC zlf=!23d~E#&V~x!Mn@ftMqml12qSpTvdy{`yZ@bU<~xUIA0ft3n2vd|iLFXPLqtfg ze1?}wd3*y^k~@3z{H$K=@UCZjT%P_HkF?&m4KF4qY>t^#r1;VAxJjr%d)unpySovE z0C1-EA56Tz;i)hMkU62#|0<{~HdDHNYYNYgv}np!(%P&zyKnjJNWZBU8{EcW`g2x_ z*97ipAO)2|BAm2t0%I^&p#0RmKbT0p#k(G=Kkc1FPHp=Trx&10aZRzv(e84~GUNg^ z01sHFZ9bPUaF?nSZnM(Cgd=oLTiVAoJboVnoRLidiYEgdM3KKyz>qaBV{PtnkTS8s zJAGf>9&$2r^(2oNIn1_#WvKCjUb@Q?{!LSQfMNgJoBk8!YJW4v*)@v%MSQGv5Et*x z1WZp)&l4nnbOa^7^!4wyhd*0e5*jgOe0!*IZvq2Ub=jNn7uZYhsA;Wr8vg1M2}p+b zhqlB(;lD!#kz`Hz<`}wFT<+Bzpei;L_26>yzHX}BNw;VLgsKn`))WJ_02@38ViHE1 zPXHhdWOR7rDUfd3vP`t)xyptoxH?2>-Uiz3%k-GID0k9I=O&=XoZXo3&HpwmM|Ft9 z$;u)Q-`yKxIR^zf2svleeO;o!r+^qQm>|RqnvIaG8rKg)HAF{NT=sl&5z_^=eBe{Y zOBzXJab5cegj!8;I)8JIV~?yYBLQ(m&i)LZP5x{bdMcE49-2EczTbcsA2wN8W^MU62TJQoY`5IMX8@q}yV54NI6L&IV+K%}^L8U0NjtqqowPvmMED?C7W!H) z=VhCaPK%PycnL69Br3hD5tZu22?tuhq#vlX6z5S-+G0X<@-gwOKpqd0^yJ(Z>IP)* ze=7!d5|=i}A03|vm9C=R^$_92MynbJ=i4!AUbcfcW~B3V+Kiz_QuQ-m7pG2m!h|PI z57%9;^zT1uKUZp}RH_-(Y-2L%R)}Po>>h+yi*lV}aFjD?K6~UP7NWmn(=(R<+h407 z1u)RR_3jaXFV@uccl)xxk)s=4$BPSBRl{@z!9KW$WGj-&QJl9LK+f~-i!a=VVt`Q> zVDl=RU3(Rog_;I#?SVrt?+g*(!w(B>$%69M%Ai6_25pRfTxS>^u2G4q#4N<>HE1|p zM&m&x&nDVxmo5GJdOEPSKz7CCVk3f394r&;kJ?=ARGaC9sV@W8_-l;PPjH31T$iiviOZyO? zUUUkt#)_MjX?s$S_hljWZl++pT zhm;1>1!4+fbrG7TLS6G^rGU3?=0k>&H(Oa_^=Cu4JD^!0 zDUp;lMBb52^VywTiv5HIqym!=m%5`S4>a41m~bhJT@w59%sbYiePXA2YC*%*h+-UC ze8{LS%A&;-bh}xgxK%!-95C#rI{rW3hhB+QvM9agU@zd9sRj4EJ(hY6}l> zm3$y4h#v&)KV5?75#(88JT_ZoTfB+Ms<7F=dqD)R!N zp(Q)-@vXd|F5y)s50{FQ<=^VANuDlw9I5@!zLnnsK+em^WG7!`VY_T}X_K_6QuhuV ziVnj2UFtsrNST>?8}_cxVuj^{;;EHDUq6r+!&@=0EAsK+!KQVE_9FOyrOS*?@ouZi_h)e{T>oOV7!sn_3y;Nl44Uxn! zAu=N|{8lI;80hyrc;)4K@JcS+(M^(SfpiGP+KS{n(d_DaNhKli9(;0ue761O3R6GM zBP8W)W)fj-_4L?hyQjucv`G77fyDDz$u2)M{X5~85UtbO(|K6NRlk`a{iC>SSOg+| zm^(@ZaRo`zjUPHc#d&F`r}rT=<2Ic`QMG`IU~we4B|n6RqWl2#IS)V`0Tdt$$f%Hj zx)U<*)%fIXk?I}b@DTm#w(B}8BX;3sa8@-+4MSO2+9X_(VEgdnb+{)VA0 z8}&0N5{uf`0D2fXkRF+0Ca3=#c7+@g<$bvllo}KtxP(Wi!DSb6u4}A@>Nwis9>fsSCA5C|Pt5%gJhM;n%Xg8$3qF$ep;XgYURF)|TR02IH zmDWhnt%~;2|L$!Y7xg&84a&{Y!=)E18C~6ri*;^_acWfu`gJSV7L?^$I%)`KFL|+( zfdNWhlJV+jGy8PXcb(MSTPVMABH-2}mBw|2rdNC2pyTnz;YF=C{L)L43Kxf2 z0P^f#7(=p1`zrQ^6<$Hs7cQ^Wo4C;%GELZ*;_TSgPII!i@uXbe{6y{54{)Vi@-tb1 zvI%aBg;-(8V6*aZbGoukWQxtR&tk)@A!hK&67!Pb+p!135H2Oc?#9kyaJTPz&ga>v zd%K0^I=nkM$^ent+pCc@7RsNu+u`WCpnRwl&Ig0RY5IYz;jZWPiI(kid20p!bE()G z7gVf0xo1XN2qpdx2WRY^Ny}IRFBE2K0gDj0vgYsPW&tVjQFKw zhodD&buzP5rJ=M&A}eUCOkB){QA>nlbBhOf-%%*GFpT*f6{ZfQmhMRTZn5#Lce!tp zGjD`lrdjtO=8AJ-e;qAXu93(NnBS!1377Bf#zdi~#kTqMvuv?xLFbz#IIbX$4SUf; z*RTu@W$YhtF+4aWBeK$I0FW-u=W%i8qJ_~mD*06Jg`C%6vC&BmP;tq9`w-jBe14K^ zeamy+32Fk7N7uUr5aY%XzZdlo)Q(#K0={r_SUo!@$kE^NFXRlT+nZs|{WOx^MXQ#V zt%oYvteasa24Q|__*S05^N-4ca6+#F=?64uI1WyT2P1>Gz8~|7-n+cnSa`&k`|*RI zd{d_6%tHoo+e|(3rS1v&z1;h-CH0x=2Q{9JacaQt9L~5;fb7FLlq5Pc!5{i{{XgNd z^H(?59tCe_tw#dqa9Wf8Xj1uMT}iP=8J=``wZxGXkj6~kzhiEHXrS9fyk&8KvJ|() z+pkVWA=bDV5f(ce;o9j4X0US16AUVSfUeo08Ozo9WPh>^ICE?^hUJsYiYOL465n1W zA#L(FV0*3NA*t5w_|oC`&q&OyxQ4w_M*z>_S6xPj1XSu2lTbu2hrcRYA&=2Mxcsy*bZ-d~`x#(x0Xp#wfCRBI1?*`Y66VwgaDG`ON3 zpUQe10c!XZF#D=Gc>52x4|t_HfAo#2LH}Y=-hif_sPfuejcpouup+C?mG8ab-K1fl zWjXs#c0#V(0HGaI*ZWMR<+;Mi<`N(QPE2Hf2f?@PWP)#1Lg2s^nF(Fgo%1Lckto#6 zPY{QjXS0z-XIQZjn-rwEtK3}WbkV`F*4%*A*;^rz5Y^Q0eAUVcHfMnAgT(Z@N&9O1 zOCLfqWI0%UPRMY|RW;Kn*-qr@nV7@1spt2hjhy{E^sXrH_6+4-F6H){?%8D?^`-D? zV^9&;l<&}=`k4nV@g8XChw!M0n^#APLj%!W+~BZpV!`qNCFpvq+249A&dbmxvBu7$ za!olV4mc|{nYQ^PDhvwsB0qh&51+=`2H`LWbN|>j{Yz8>3MHSB$28RvtN)c}kvEqA zEARX^*=Af4I4-BHU=}^f_ek|O#bCV@7Iw@m(oqmamybceB3v~!1490R6pe%!Coo-O zZ@0OwW_V!-T6_sEZ%lNImic{MVq6P7KH!kjPfx=bc%qOq@dDu2QK<2#+3ivG(=4`(RJorNVJe|YW?Ul&+&g$onvF1UE8*!#DNQYHYi)(YVp1L1SxT+i1*5(&(Aq@2B@atT|_`fXNJ` zrhV(_J-iDsz(9hIonC(OnDgIJOz{+^B$qx`JT) z;ANhvs8&eyR24N3H{T$bX(x2r+>jbMg5yfdc^02vw~u)DEn4hnJU)8t)T{;O`_)+Fy7?)RHH&JBb-#T9=;MY9su4iHi+xgf(aNDdASTg;i z3s&4J|8SUlI`ekpSrqClF^JbdW=fEJ1_WZFvo*y6ZoXIlMGxoKPBl;i&35kQCG95) zJ^)x6`|?i_Vui^EaTEJZHZ&lI^Nm&~?K?w8;*85ZD+^2B3Ae6m`tnT=l)CC9Ao1x8_iJ_*nd~GZ=C1M9XFYGoZSGXp9WEg?hu+wZ*!F-q_Gc)8 zE6<(#k&v^4fEDuoCbj=vu~PqRkxSVoi>@TvR)tnIsw=M{ZHmIfhFi5hyvwIfX7IKA zb`z=4v5CmxaRmNpYym?qR<@*0wy6Q(eW{laV(d~6IE3g|KAs9wt_tqrug6@`quMPf zb_|(7g<1o2L+#~Q9IpNd*IZ6WoTh4bIM1{?!Q+T6#=98vQ^rOUZASHexB_BZMQ#l# zN_CK*vEAp4%#OWB4-~hDTu4d%{DmZTQNN+gOl^aQ6@G;7*-SFDXPj`0M-rmY5}PV3Nx;el(NX z%5E{OT3IK7e$SRNqbH^`shNn0q*8mpFG8B*EFd)bav~g*N0S&d%(CxGi%+U{Fu7X% z*IitCzgX0CsBG|VO)p}$ z!O))gT#}RW=p21$#fwnD@WGQ45IRXO2Ji_DcKtp@g>dAmY4K8dP7D zyuHO2_L5!US`Dac>TdeH{_W#2#7}UUz`nVGTCR7t_;Ak@Z}Fk@<9}mS+#-V!EcpFq z5E}(ldt5+y3yX(Qaw|4|9FC@_Ag?x~Z<@$7q>H#hsdirND4Bw3iET%ov?tFJMMt$; zYnJ#ahf3M-{&5OeXM3u>ABu>5P9&))KNSO%8?qRyQw;x>os4z)iJb~LqRHH8 zl6hbUoY2;Ah9i=B&-%_^0@bR59=bYNmiPC#doyh25;>1VBeCezGm^&S_n5j^`Mxto zp}IbN0lL*xi**BCg9pePqYWCwO!N1<=z{~n$TY!K`ZJ9HBkgKr5a z;f1{R?S_gs$Ju|LUO==PKK|72 zZsSm-g;?dG#w;qw4(ucr!(n6VZe3?0;RJ!j!zcRPon+31n}}C3L6R$T>zGYY@GROt;2*dY{O zaz=L2}W>=94C9X-XJ z9(S?3zgyn4(yqfw$b$Y6{v8w2t;L6+T^ATA#MSsg#H3Z>b88*nIK4fH3QSVtAPEyI zOZUFP?`DUU?D$QSDY6x)G1`Yf+CJJvzT2t&m7>A#sZtQM=NKUtNi=6lT^mCasdp6X zkhmrP{dv31e>g zudCd$Omso+@vy06GFKQD8aCa4EP6n%KK*d}Z`#RcrFblHP$jP(v1OuUIE$f-3K&)< z@5HQOE886<;Q6?%*NxhDqZq0mi{1A zBMtJ6#+T$idayQvPe)Co2Yc=4$l0yY+9zfqE~TF|W|ID{YYn!jF-Yog1*IMOeY>MK zr7$9#Ho@#A_ImpZ!;fz5)5OXsSG|>l6h>TP604WImSJw(TLaOXYW+cP`y6;bHM6Z2 z>5ISRvAWtZ%B@^Jx_S8!{VOFkG{Vg`@u`oxN=cA?i`dEJY2J^!IDAqkH*BgQX14+F zk$ecAkou9aC`d-9l~=hLI~Q~l8sE~Zf|+|g%?&vRS@;H7GWCiy##Qi4{AR$Fucn<_ zqpoRRUB01>zAQBeg58=Za_cRNYR$6Er$L(YOQd}}nJwaI2{Z_6M{6HMl0~#RMwPa$ zfhof$V(>HieM3b=7a5BCQ_)?Adu&aY$g}TQq$FFCO`Dwwvw64AT_Q${PXp(-b;3nf&o85NKRsUWYAl0n2tqW<&(N;)bn~dw3@{^KtA^*^%XRB( zIHAroLXk_zGWcI2-POR>g}G$8!MNhyC1WnO-jf+mzLjS)0EXy zvZj7lhzr4+R?3H*;k%vSR1m|ko`2M?T7#BeZg0`` z=NNtvJ9;A$<1MjT9@ch9@GHBS{b=Ty)_E|^1%ueq@1Wf17~skN$X;5t>%y-`Ds{2X z)0r(oOjPodpe>U%xW92m{Bx3%0-~MpSb^lOcosj;ONErC%+WfT_$X1)qEYWdHr4f|l-W1U#KPGWEO4;_R? zg4uL-`S}>-pGE)ITfh-Y3ka9cu>zEz@O(TUpo;i_)s1+>$W1RRzi0#q?@4yv6ta6m zo@L9-LzYaa+pdzarxez*Ifv{2X2+*Pe97@1CVa;ZL5*E7g~)FDrs_!v5f73x6Wg7L!Qll_P6^#i$(GKx?A^ZGa^QzdLc4Ml|CoAaS`N3(8UslO-;zbG0( zY^)nSy8=h)K1`a(2(ZhD@tZqb3brAq5t|!)Xqq_|Lhm3KpX^cqgHg-s zXyR%wXzjG(K(qu>1CknyICMFmZ3(GQ!iAZkBe^S}lNAR~LFTs_X^w3yHgpUy1M=cQ z_M~ZMeA$0tJ58#DUH@L&LWr;Guloxj)l6Y)@U%z_ZDrHa4gu9`Z*zWmGt=rQBDGDQQq%8I)TQ|=)wKS)(NW)Ra9lJ&c}ejZqKbW8=W zhDmaE{xN(yWDTxjn{FFEIXGkqSGv@@wyz&_6J^G12Ah|fUWfO<-lUVN-maFL}4jo>X0W?0PBRqQTNtI~a% z)Gt>dDJtEZrz$gb45ew4uT6T%)u2|qfKAE!ZMnj-z}*aV}K2i9lT$E-LnOv+3G60F1wj$02bDY@lW8Zv+$ z<_$R;D5yuH;%Nz-$;ih-O*QHgO&WtfxckCpQfyk{RxPUo(rkgtTBP3ISCo)yqq%hfW^My?BqovH&#Lo) zLztZfxp+f4oKC)ODE>fLQ1qhJ#QP}OqPdb27IpnFvi10wH_6niT@nne8ba!j-EpJ9 zKrVXEPxkMtznfTZyW3?k`1y^hQ+j;S8+OGj+k+B!hDNV~E-x_bk#F zOH&LMcjM=}cZ|@?SG@8KnO-nP$ggRleoIs#PS>w<#>2d#AAp_t++e!LfqE(u*nUxS zFYmsYl`oytd|ady#%oBOL5bX13#%4JFW{3usT2R@EqPAEQkbGC zKbK-7HplB{$(oA zq?;KBWM~={WBJe|&JE+>|5mPt*7)mvwDTX@9l=wHfzTdkuAAsaFZTFn|HD2y&>ee3 z!FxPs-*@M~T^2L7mJJ5~VK{IS6TxSgC$iD$ri4>9WOH|$ih4_jl>h08{yQ0&aA#{E zt!ik%TyJq2n93KP-PmvpunKDE{8dy0m7Se^>dVR+*_O#2;s>o{U@&)z2)5wj9>I3; zb1huaoomw@jZuC4-i-M}0F>$iIAuuoTKq08!za%{+E~*>)GetfvH?aC#=?~KU z6J>3ylg~uIj0x?mBZT1XofFi5;_GCQJ`&sKWuJ!G!qmL(N9z6D16%Di7B!#S01Tzg zcESY10Ql4;f@Twue}FHdGxhK*mv5=2iU1t3d?UdN6gFkdDb1ZZ_{#Fgva)4Y(3ZEh zrT_%tV#sx}hv0PGZ|0C1ojFVeoueHhCrZCt8!<((%ClHORwBygC1))|8tKHq166_4 zV3gCZ@sdJ+tkdg;=RL4++yzpNJ`x{f9Vc_Fx-Kb{`@Uk1J2wltJRQ?+nSxwsoT0_O z|Hn%FB)~g%`R7j`1lM)F-1Cn2@iZc?&A#yu^Lt$%!BH?pv5Zg0~ zzJApHQO_dr14EU=G6U;Rh5y`pZwFC2T@{$KgPL)W0E2y8oWBvZrOFIgEikA@|nc-O4sH<7fEz$b?PB8ptz zL)Z7b_FAYWucx2k%1`qpnH156v;cSVEDWl${e1>Y8?Q>?uYosmtGE&29*zVGNPf+} zxjv;g1!I!o4z<~K8T*>h*#QOBM*dkhLa}0vU(?G%>aAV)bz~KYSCVAa4btBlY6?R% z*;=VwkQy|jB{QkM|JXGuvKQc933{>Wz&--D;>A9s_%tnah&L#PF}?Yll&uht1e0Zw zM=o$sY`j3;77HV~u>_gN%s<22BwL*9KTzG#l2Ej-^FRDs?W%r0}e+^z(4AQZD6b2&wt9P1!Yzp$KPr1P-`J)gH*^>;-K4SSGXbXhDS)&2nppEZuyoyVxa~Fl`}QR z?ze$JB9NDC!>#O6bYb2Yy?{oUf6e(ZD@R91brCIWRju;2sL5GalswE{L3+VrG=CD1 zuS0t(g!inZ2(ih6pD5Lvo5~|1$vr{2{hdg7x6jCP@jmD3G`z~38LQ(KKhQqMlWLW z$Pakfq``5+Lp7-P{Hu!y3)I(~v5}3qBBSe456+(>p7*RB7~4fFWdq-{1Qs>>jte+N zh&>L5w`^mN)o*g|YWVRMnG2U#9Ham>6x5-wyZcKBkkp|*433$3gWPD9-P!sV<8C*n z^*{hf*Q1tE9kBN>ZjWi=(fKY9!lCcmyQDh{MS%$mO(C}Jr-RPU+PbFNW9kWM|3zwI z%v%ZO7*4%$ky#2j6gsZ_U1=_Bjra-H30S4zy8n3;5+#rqpXg+iJ?X2)dfxFzSKy(r zeC7``qutFlMQ9pIBt}WtpMx&n6L5;B71CcaD&WQ&S&eMmr=ig@?wrEZFA31xNl2&5 zSy$wM~!F48$mkQFVPgy`eEfrlx~1i=5>w%^Oj9 zqY#5Lu;tpo0gYu(oTUmwD$W}wGW<7__=#Ldy5z>bfiDhbDHx8P0^53eCr6_P5qQn$IuZ<5xFPP-yiA3B0v)*86NqP*>kq}DT z7Q#!G8-#3mMw8=l~pONwNjIsJw-d4wMG`joJzG$b}=h&XNF6AXC4dRPBZ zlGy!_sJj?>O-AKbYEdyMd@k0QleL%>({=c2_~7miB2dy+7&L&X0a#A+HrWd9DRVf; zOG^E+${0Lk=bci?L)K9o@hH~MExnGzE}hG*=MDwdSLIAA`Ktu}92e0#sc-B;*&=t2 zM!g#;7lHKFGSC|kjhbNls`c-7WVT{|B7=XPOoY{xWV z{8^%!jw(_5>niMf&ksLssM8;Z&ydDLOiq;(*Nd6)H7SciC!WmymE=~Ia0BnPbePea zDFn4>i{U=`yVe)EoOhbgLRx#u=GyMy$CnKOs`Yf&>4K>JJBmJv7n8n--66!yTON^_ zD#saME77cRL>AdH%Ht(IO^5O?7eK{4VC>9#lQR3q6!gfms|j!ZAG{Adzi+c`>{_E7 zI#Edz>BqY~2|0Zg9_IphKU3gONOfj0L$2%~M zm0;akneKXFrj+T%S=%YgMb(5fuH5v^usgtw&HdWrDgNw4&^EfI>bPHDUSvrYp?Bp! zhnzns+6?cp1)`>5F1-Yy682hhQ>80Hqm~)^rl2L^x^b098QKciurJn z|9P8oxDhe(xs|PN3SRis7!$XJ#g$^HKlyZ2Sz^+XLck6E&CTj`l6>ACjEcl-t`K_? z6=qj#r5@h`1*SlR<vS!;RYaAyp2(!oNJWk2r3y6{b>o2 zj!*;B!S*7E7op?JVi-w#ZlXKS)rI`7j zI(r>2A8;ih9%?DDrLhwFs-C@X z^e{LfWEQ5VJSlMVeY7{$OTQg~DHkm&wB=inl;x#4z>lzT0g-A=vucNg+ALO7?A($WD&M@gxDvm%`1#d(Y7 z6dwE-)-^2q%X%XzeFJyIFF~MLdY^5z$Xbo~Ag|t9S?{;9T5eMu&V_5T-|OG&;2+~5 zC`a3tzyEKn0;{Uhq!6i4d%-==atx>kPeV_PsR``D$!z)NiugkKsWh-tqepE;CBMBE zP1hDTqk`B+haoK0THNnS;e_g}uQ4v}iz1GRHx{dBv7)^C8CnStlmarAn@));5MHj* zeQDCYbK?}>NPrF95<65!;c0y1KbdML(I0%SB$h3gVQ54DFhG!^o_(Q^peW);_GMV{ z5wZ@Gvv~~Bh<%E{XBA$>7DiQBA9xAt*=Q&#I9z(mD?aWuTp^1XyRmPpPfdrsjIv2t zPO6_Hrk=iujbO3R*)aYL0LP(=9KZjL0ZzT^$|WNvLnW_3coM0-Aak()qHtX(^!b2l zmaTN>XCoU3pVfyL*|61l$gU#4Wto0%ejb85^=&Z9JDjI*X6S zmC+?2?t%h?oVW=SgZhQ%Dy}UyzGmuH)f5+|B;ey;h$S%A)nn`aVWgtcVS^k3!D7;wx_NBJpI+xBYXXGcC?Wqr8f=E~Jkosm|gWCg4)&Qs3J$hadjew@*}_ycDWX zbj5En1BGdOUAXu)pZ0rM*4un%i_^}p2^JpU;wNTQYS66LBpwG+3ZIitUt40&Z>>c3 zY3tsx_XjvwWsv+M0y#T_ujz2U4!MMaWFjNBop)lM_{0}NPu!<@a(}kMacmX2%0Q3j zRD(KIlB8V9p4^0yM2UvjQwYs|+X1Jls_LIPIFVd{x2(Kfi_vyo@8tAX9W<)6M?mpg zHf;P0p{*s<41LQJH$)|z$Xws)UawMCR#w#UVuShAEyg?sZb)@JdlSb`2|M%{7nID{(*5c-7+{VVpffp@Kw>@bH>z&_zV8>}FHt?Gl z297>?x12n;oKIKxZ>YPL?|VPeKiL~$0j+sijqNYlxgirtBe8Ax%fdnzcDWkN6TfM~ zPzWi!ddoR+*B%1DxQ};DZx8%}_yV4jMNP!Nbqx54#!_ruk4;gu_XHt~8Sx5Y#kmkC zeTKI-A3e6Af`j$J61?QcK zr8Lrn^>YE-7?P=6$)qyH_Qa9?wGs`IjaL%U#l#`EYb{b?eIvnwV3v5$yGicKuej4Q zIr2Qix7Q|-*R#0%*cmQ;E5z5u8h7VYZ2vY1M_I@4=^vBYJf0BPnz!X}k*m&_+wBhG zj~sD6PD03f=vt<{So(cIru5)S9W@C}y-bvCRB1Bt7_ozO=;3L*)vuwVTqmYDAYG@5 zNnQZveBR00q9%e#>GEP1*%XL!DZGVII9)k_^3J}esYcaHnmyoV+=-vvPPbbIJd@W% zGHFF676fGKm#A2bTQ0-}tcERfA@PSPcTN$3POnx_jUpy|1!QrfuR|iIpgnPHqi7G# z*N@(si9-1Zg|}=Ui}LfqS;ZMs$G2IKk{t>B3()CmdU_xj<{~bg7FWaYV^54LC_WT! z529FDcfb*E4ON7Qn@eQC&k*IP>nw_c>s;vs$J?&U%^H&d^LOJ!*zyhJJpV$Cv7s0k zTNiGFMUXsj&Ail^<|7U_Y4R2&hax&_>&JM9Z0Ub zhU7-h?xr3Rh}q)h04$&kF2^!tHyWhW^0I z^^>J|53~X4*Xi^VpIn5fg)5_CVNVM7!6C`1S_7$%2b=PUWf(O319aM1{C*oqdTzRn z*)J>~OWsyBoYPN<#YmezIQy_*SL9eP(;|X;S8Wm)BZs;4WaH$F;85~iGg5)xxg?!` z;odHi-A!GhWV3@(W*_u5gHM7hoTf5REp+O{&`B_Wu*#ZRwU^K*-NoeslWQH$5eVAR&z<_0my4s` z5Q~d{lGNPU&pQ9?ZZLk~WzAnP-VEK;nXFcWR4A8t-+;Lr8meEP!Vs}_;4=wb_xAacv*geTT!6E#(F*-!#Zx2S zR7q3h`gq}M&-<8CeHuSw+vWE2;|(yv2)tT6hsGTS44J5P;t&R;QwtM%T>ncqpm2oA7LuYi zk=vx_;Xz!(eqb#43rMPV-v2wu0v2jl)3I(WiMNO}eI+sr_TRcn2qqh`ZW21g3{HzIe0lQ1`K(4}_PiTocv1_APa9TpbGX){Hd z?pYp67cnCT0b|driIdUM(X~Bp`85W6&o+0@9uBqu-_9O~Bq$PT>TkX{ljjiw?SuEA$J>hO3Hmo4vai zGV!Oogj;r0S}9|9-v^o<2BC4ON$`R*Ii*@2SZSD5lWiLwY($sd-cX7%tc*|<+%uF zx6#gx7tCwv%ZTzPrkpf5#mHa%0w$V<_wSC(Fv|#-j|=#q*y<>j@1Iw z;xPE|nKh^FQ_Buuct=tB>R!sVv9IC&T9R#lh4r&=KWO2o3?6<|5k*ZncI{$lA~mOFM0tzFCP1}(BXO=9_)3Ec@qY%jiXvPe`SLugX@Qx!)_?pHX&A1hsi*Js-x~zp z>xqc?GC+*jE|Yt!n5mA*@$ZU`NEtiMfw1~d{QE7Z3N&Fh?5|~RD_95n(5Ap#_ZC$O z@*m+-D}S&5WN>vhQ=9%lG~(bxzg#-ofREi&7f8N4+Q#khvDOJ0_vz_uf^w;6*M0M? z|H(GkVIfR<|VuyUu#4*aoKmg&}Lsysfu<`eFgW)x^BrmFb1fZQw@ab5F zhsPnYL)TSBzu^+i<&}}tr@Y_&2cMV`vXdcW$r*t;SNl{Ih&-m|_WG>8$7HfGF^Qi` z1mvC;{P{DAFLs0DABd)UkhOKA>CtNlPmjS3-3B@a7D?LJ;j`}15`+6hEtyge#7Y$* zAEZWie&1m?Wmb9a**~;`qMoAR08UQ25qQ#%%us+hBb`^V_n?o@^_V-@d4frR8KR*c1DTYi6&>E znRwj!Vn9ZGwvf=)*Ji;QE}LRE%MU)8oF@^$u=m)u(j{zn_lSVW8fsX4GLO2-27JcS z+PK4?LE!X!_r?-DHf~0sR^M}3M?A|$!yTIY)T2$WSu&Xv%LuqvfWS>$Q8#^&)5J z#b0tJ)XP7OmbE*NAA0^OWa;;h72(A2*YLzHk2?Lr5-I5y1@?lp@BUf^$JASQ$e!!- z!c1_TNtD9n59a-aha2;4-VgwaxiD(?@X4*ZmTkt4gg5jsF5iE|J7j^!JjyYPx}0Ii zTy)Em2R&ZNkly-mAMN5oByY1BkA)yZ@B8>?vTtp&uGml!+YCp;Twbwg8IHj2R>a=F-RV78fz}vU$H8ef={btUBtOE3fN*=k zV%FYPJBv@?{qP)!zXBMb6n|i<#gQNW{HdmQuwVJO$`*q2dV1*nqX1?{+Bjm$71A-q z~jgThgKAWIGXBQ8K$}DnEQ6q{ejKV?e&nQrVzH>fzBE14$b33(fJ+ z>cZER)u=H|*No)CC$%tb4>*liY5fw}eo4uRm$oXmFq=PBZ$7NCOq_nhoKjzF6Nj;^xx+x9@KBN3G8@uS`4tGQtZAUTqEOF!)Sx#&) zc%K~^E8u5r5ZP@cf4|`n;>{jOY-a@vbZH3=MguuA>$p>Kv}k%J44?yZnE|M=gWNqt zF_SV6ikLIx=Tc=^Xb?(eR6!M|%PeKNeJruCV&Ar08*{(rLA2o&bTFk};vx5BNi>}YN`rWvNogMMF@hQO zDZ2OM8+<+RX*ry4R2+tN6$T%C=iw3sRL6vaDIS}G@3G4Qh9vS||C~2{_n({WJrF0< zA){TgkUJ%|VTLV~g3JPFbMJOqyTg_muq>E|WO+ybB*7l{ICp;5!|kSkwQLp#xDuLa z{=~6}>MUI5m0S0TAZ$6x`eR}EX-sT7g0ByZ0>41ohvj3q#rdB=r7tCTq16HU_wR3J z=EHf}u7H{60@NG`DAV^sD=Xy^1tYB!;o=>FiGAjWiMxW?5~R@oTV!qn`Ce|LsHzIE z(U2a;$Lip@@eLwIwagq5GE|VQhvBjO`TR&x=>EDAy$r&7aHB1TR5{=tF$vNH+R+2q`o!i3F)&nNaanCSh3Uzx4q8 z$R_*jmgOmqNj;cdmZR~EML^V53nEM{^57vDDd=Yy`s4}krPqdzYaOvfGvf=-Mz$UrfHFM1@S6%*W?IY*RG7o zQXO`7SE+oVo?42TkCI{TC&StKDUE=X=pBOw?x%_K+T~}jgDAyTYqg)*iO6DN@|kz@ zg|{i|bzx4Ik})2SG_xvO0?IJ~CYC8u=EYJklU>8|tSyA+CXR657JfM+z{h2Ep({gm zXPhN2QoiO}??d8A08a!%JO(SkWHs|{cOBsCAX${B4%>-C4M9`qKm&$J&$bwCW{bc0 z=kXi)-v;=q)R&dlY8?k}WVhcisBT;K*L!=2221h{58BVi07rQeP-`S|SCh%pC!d2n z-UAj@X=+$ob{n54uARptVnX0XYMYaNTa61XG@D5_Uc#?|AAXN|kz3F)yce7fY{{)g z1vM8+Qu{AOOQNRS&eLRMTifa- z0CVvk5w!&Hw?N-+Y`-<#waTNH)JpC&xu&Uj%153#d9b%Q6C4fw42}(IQlC53&d=S_ zkTHJZvRxXt6@s?`%CYlLDk59zH1t&T=4ML?qU$KmI29tZn7S6GC*KX@QMnYLV(#yQ zU=-%#F>GODpiL);I;rerml6&B$WxKyYFe~2`DqfR7cjtuA8vud)7sJ;3tc->+KG2|>rA0yURppA_nm+aqg{7d|Y z4dfdMHt~@CCAtz9FmxdOqc*5+MZ&!jqXmEB!NdmB(B!J%lFfcWFJ}dL=uKaCY8*Yt zC9(c4W2EzYH}u4elX?12x&)u2T*oKFa460DM0IG2c3lIszO#KLsySu~-b<1II%V28 zSXEv`JNa1Zt?^Nx3!zj4Y_F}Xv7W(#qii}01d=qzsgGraHg_u=T$dZSRy30+t! zQtTseSIAkmxi>iRe5ktN4Uaifw6Ho?rhw=a4T-F&U!t})CKebAB1QJ%c>Ovg(RmbM z_O;qKCI%qyCp-g;yvJ)Wo#;-rbU@uZ%2tPaJy)y!1J4Fq{S-9z@PXYef zvNYPZWX3K{1??RiI9;|x7ViSuBIe{_I<04I3aMRa;cy)+hFRHGP|lD~Av9s?A5c@< ztr4;XC#k?ezq=t>}*{s+Isp!=ST2;+LhKOezrKaBlDz%BqPp~KE z7(!*`@-?h75}U$vX>-w`j_*$mynIFo(o`#kPJ zn78Yx#M!({fV{e4X#}Kg4~en(tSjU1#^DExtwAA%zRG!A#B#~AP?3k^VTLQ&B8KAe z$6ji(kmi@IRx>+%)6>fi5N&SqEd8$0@PIRx4C^c3s`%vHQ8Y!Ubbi+XWa#MXFgOH{ z^#0d*KMRy`^`Gqurc=z1OOWLHfAm>o`|UHjCxZYG7uTH;O8-z~S_R1>%WM3_0|#$< z-=JmVV4e;m*ubF<;We=8Y)Aj-H2mcBwBIsGG}6$e3y++9=}Jp#pJ8C`KiIU2a~o9R zbskj#^RN(Bc&Mu1tS_~HIKIcv(+fNwwyZn9-0=1N`T~A-6818yc+^M5@D59QnclEz zf7jd()v+~78XVaXV27A)YRBqj;?JqZ_Sgl7zx;cWqa*N);gc$)s#tvK2Z8Qq2!rAk z5c^%|=`AGP5nFKi_tMipDJFy9V{;qz{FEg*>f-z=wRzr($xVjU@$}2 zPU=?=Z)!JdB;T@RC!wP}Bv9adt=fSg{Fq)*s=#)Vs(=C?lHxsslzzNr_{xBB*Sr9} zI3~9nsD$(Hl-Qgy)rDUMA%Asd(r@kVFM5?4bg|)vrE^YA#kVUH6r6~1Ka+`4DKi>W zf6tEAY4|4`JD=&1(U(>HAYQpHaibs$z(y3J+m&pnG*Y#YZn8d0f5Yn|lIL4AN05HD z^tWixL>CY6;h7>PDs4t$uo|vLd$*c^(-|#==^X4PG;3n$D5fO-1XD$SbTzY z>$Wyv`+ZC>zt9U59yZ;*K z)BQEr$!zTP$syrt)WA>n0)jG$B=*6HW9unaSn`l*jMzd48o5>eWgw)auI%&l{g4#gOH-0ibsL_j8p!wCuZq$s&;D zSc^qE$HSBHq*;m-Oh!SXIC2hMnx>N4$K}56XiOsC(7ro6drVk5n;ADCuE!Td)ynrZ zI&B(NFi9q~pJZ{;mm=m@C~1_H&E46s!&x|H!~GIGVEx%2?7AK%oi{#p{tUoRAd-QH z7hsE7bxn0mg$A5KLZE+pZMK+OUxKTGxL{yLzC>PRqMFpuuHkZ$Wa8FFbC1luz3pE- z9d|0fp55iU;z*9a^lWGp5C2lH`Y27HtF%2=XL{+rC6q{Cos>aCynB#N)iK|OA2!|C z*w8r74*9QMdcgdUcPbcL;WPJ4GeU-)7FFKiRwp{qOx3uqr0L2%e8WY+nGcI^9yD$H z7K@vu`5vm*8n(`r!c=RwC8iFxE>!_J7LmNWOK%$(FKeeDPP!ib=`XUmbv^H zw%#Gt956`KR`Qnoh2l|3^#-d~D^3D9#@S)mat)>*4JwO$X|gyuG|(p&*fC{fj_HE*?}*KV?Sb zaIqxCU|?lOuV^x~4GAG-sSkCSt11%8x8K5#@OVKj*V$-!@{)xi4#kUV*L)q-x+1Y% zw*bR}f8hx9&k(-FctbktO~sW&Nex8&`L9#GSkW%o9S=0ccUHEj$bXGzqWYXQ zTiT{&AI{E)&$k0hMKy1GGKCg zGs-}-oVepH9aSh1CQtR3Q!=Njz=-|PO}63l#Q7(G7XTE4`>>{d1}wKjMSf5Pw9#awM(_?Xs!9yB#gM6(&`QvmHe+w*yjJj%sY=1WPS4)J-^kM-{zF}`#b}( z2)|oIX3`wms)6X|3!d6Aeba(?`kPD!4BCSrOVmdiuRlMdSKd*L_QN-BJ&no*>uSU~ zdZlFoLPOZ#JDytG&c&tf~Ci8{;Rr zAVW3d9w&t#2!dfSmG9n?9 z3&mm!p;+ago?gnLboSCcCf{4(pq6`lC#cz7VWQtzn{N62#RY?9%TC!CGG>_(*!Tw_ z>@ApIa23aM`h~zBwLd@5;O~vbL@cZ^zY)O}<;cJJjVUH~Q~Bu)smkY|bJyo&V^_di zn+4hazx2QEcs5WdjJ_UDDW~RiL^jVXekNDh+=%Z5k_>;6wGOikEHey&k7=T++KAq8 zWAHlde;Pb!D!5fkoH0_9iub}j255XealA$B~>!9{AkzzPcF4&cR@ebwBl&ou&S2W<=D~- zq8}ucXcRYrSzxCO4cOom;zXoyV$yjO|9Z(G;(5c+>HQ$k$*~zeb{3R$fT0%oSWXbE zkx8`rpRQKf6i1zku3&>Z4Ttl#C&(dZxel3pb!r?rBY|6g;XenfZ*r#Q-!s$T)_d_k zQ8kxeT=o3|5GY%CXmdYSZb9RaNdxk?2~vfh;$iFGIH*QRy=`dbKzwPVO&>W|DnDV@ z;%Nw9sX33@T?YxKC1Q#V`lg4cRSjvg0kY?!f>B9z*uvPMDpe_kbym#mCca4diA1X}DNjB6PQUOnk5gHVERJOCt@j%2+i63wo%s zD{xJyYS~C8pqCU8jgT~ga;(dbJ85aZ6af9G8V&-(+A5IDce5zz+`Td+e0rWxuB%Az zq}=whbZrWj*@s z|24X{p~|$Gc;M3{Q!=gxE!?hoh`@pwI($i_bFK2B$z#*nCj!34`Ua$p;@7R@x@qj3 ztr&J)`j$@TW-}4^zK>xLdJTpS6zU%JP`ltv z?)a`n?My-b0iIE_p%R;=}JJQr>!*<5qrSb`(wSZlQi6%Y?{%27LoS88&7A9}rzMS#s_F76S`2{WhUA zRQ|G|>x&K&Hs1A`ynx6Yev(lV_nI8eAcyu-PEqqO;hKG~TtLZmL5*gbo^qWOJFuY^ zW1eXF`4u4O&rT)OAe?XYzDP3%Nuf(k@A9gSJ&gkLgRF{D=BY~pBtcIHoP`F^!e`IU ztR+$%QcyK0=h<-)VcW}w7&4(&d9eg#0E> zll#PBV5u&N05f9TNO`=Ee`}CvjuImnxlcRUAkwXJ-c(m*uz+QA{2a4{tmG~+#9A!od9NXlcpH`l*|^}zq?qxWUN`f zPBnc7#7NMoeLz7#wp`fCvq_|>^v!eNd(r;*`B*u!<|%_Plcdw8Ii@S|IHPZxpM~Ot z{Orthvtq^GMoE_WTz(f6abpz=zX#64XQ}wG3*`Oec)X9tN}DM^2Z`NA9nT z4i&iG@jGD~4}~7B>kdaiddME~*|su_i4RikavB;M8ixxei@^H(uNFBuIk@VotFU+Q%m3wN?+}@)^I7Z0=KO!zr36|NO^U@wh3MBOd=NM(FBtGmpB~hVB z8Q+`4gM^<#FBFnurj7mDihRp^lZY@AuZV&W5t4Cc*;x|KjrY_?47FyS4~YOt0)^L@ z$!7U2%jNmGNs36&RmttKOAdYt8XFriZrnI*-n<#!-a?p;AV{p&=G9B%cf(Rc8T=PI?Cqv~ zAERh)Zbp23JW5JR;N!#U9u1A7i{7f;aP^TjV9C8=kFvL>oM2SKa9?8(P1YaF`LDyo zsg|Dx??~g%e3-T8{hWobpBO_xFro$!VeL z^-b;FXzA!ykGpr)B4g*%2wHw80#@9El&ojq8DdwO&GJ{y%B8bUa!sO8Cq$K-G@WIV zTsqoR4Cl;UW}}&fW5k%5vp^v#>`$dr&-)0H5YF!9xGxfKepnu-KhYze2YX35ifilJ zLax^UE1!_}{@QpA!30XFNz{;Y3^GHq z&mJ}kHSPM`D9a`3UVJC8;=jP^r-5(8aUp4-(3{2)0b3nb?5rR?Pq`MEl; zR$a>*3XClF_@oewj0;q)53QZO^0N|KOBz-Ac(|Wg^0(k$>7YO zGvrAoxpsu~Gk_B-B%1*zs+62HNvHErBDG2EF`KnkXR^nVR2@MU1V%jEE{Lm zj^ujO6-N0dGhKAnnGvJoPEx__GaYMY)ofy96l|i%7#(B<`6B2TkbF2QlCwOV89QdV znRROt0$|^-Y6JUbsT9uVk#N@+8G!+k0+Mj{*4l%_hGd2rHmZm-0KlvtvvC}yv{T3r zOKwRBc-|_DEd;(`r7@z(RxYe@SV6+fiVkL=ZD3}b0V$SxvkGKkW~I^;aCLFns}jqs zM6TEg$9R!Evtoo*9#oUPW@-{H92+Nu+Y2)W$u%N)b4?LG-CBkZw^X7w<^*Geo1?%V zv(mN}1bZ;s%RXj~wo)VI#JMHGWF-ZAtQmx2P>2MUjvlW$ite$Tngo==DVB3GkhDb{ z;dRd$!K|hdqZFN_mo`Hp&ttRKlJ7~|uxTaOgF!6@xJZim9DDG6c&==#U;u6V2S#NH zGw2LDQQXiGEFI^0)2V&r$A{p#EJ$H1hkcVVs%Q2|7Op;*(mB!h>N`WZ;POy)AW{MX=a^5uvvkiShQZ~O1 z6U$BKJY{6mIHHH;qpCDq9_RdBX01qISzcPYK^)yLjcjn{$3|bZpAc<2?Ih?d0cV*k zooYUx*nGkI&U%&cxFW?u4rbhIh(56ll|+VFJ&K@F zd}+m_;%Kaua}shw=et49n_sSq;5&fl!|YmZj*&Io5Aql{WzH2P;bw+0TW+eCO&-%E z`-p%+sVcTY*j>*A?TGhWJtY};oHj@Wo2W+X(~oz?zPkV=9O#XBxm7lG;jOjBph7=e zD+rFkv-N%FH_E?lf8>=}?ey8kwPquT)E1!V7)A9cD9WW5U0KrcAoD2?d z{x;vw(oZ&-=Nmi5AoFUCk$46`23H+25XjFL*p0+TUq`l#QLQb`B;`yqXOmkewL+zfLxyr21{EVOk8V}u%g(> zX^#P?{73f7dyKt$ve=9#)&8w7a5l&u-QCrJE)(ZuTkkv6V1*Kcf<>}c+kx$Mvxh{f z@Avgpg-Z8BLqkL3P(zdK!wgqh%p?3f)rc-f!~}Zn?a3xMV(5=EGjQ>^cvbSsabA~C zO2n+ekvKj*94{`<$5%Tlu`0J#MZLEQd9P~f?3YhgH+QO@ZO#`a5urdDOR`-0%L+5g zD}=erLmYLMev&BDx#su_k_%>y=xB2U5jS*}(~=x8dq+adY!@AOiXoZA(WbLaQo7+~ zqqmwwC1Hw?5Fyb7&zIw}idGupXeZwXocF3h?nzF{wi=E+d$a3>h_Hm4O2{mIJ^cw_ z&eOnHimRnsk0%G@jmKokE=$`LIktl5!_78P&Vd=i72=G9q#J$RHVhRqvPzC^>$4RH zMiCQ|p=NRZS*ogW*kKle>>!@M7gWNhGc+XHUnzvQb@qsD7@28%%orH1vBU^fA(O&imKqOzM!pg{yS}D@!bYtXXcrG(R|&Ia}5IBDR#p5 z+p}1Xn_}1pK|Ka4{!_nz8E$@lRdpiI<=-T@>iy(3N=@AkECpv4oi@rK9Emm|Yl*3H zJNjgOvDl5%QQ*4668dN5yLo0J@bow%v#-XXS-}>L+(kyk3WIgb?(_HUAIbO4acDaD zrE%(J6&RoGP2paS8x`zo3`uxLkx8nbyYn5dfox&FDX(7r3PpvL)?ZZh^eR+~}m zu)n~ZGeB&&tGyjvF6P5iE>5s2hu_7XCC1ue2l_=EbIQ5dYx88?($LV*IK)IiI+pb;p1VNiMYs7;JI`i8e_yGvch!P^4Dg zpZ9S@7=uV8;#`j3J`ea%uFJQC5YM{-7=0NKCCAPbQpsLRX2BV_;`_2ag7d05i@rpN z?VA5tuTgO()%m?7b}7eYSv$|4=e0wmEvx! zBB6ayERbS_B(-7bM)|nPy36;(b%N&=Ib7BW`TP_cw2^GFLD$+Gm_NN1Ciafos%U7> zwB@B@n_OFXTn6*z%NoII0RGK?Fn)I}mBwrAQIl;1#B_hQ#xVpE=!Ui~?2!GQ`#!VS z43@Wb9k{RLRJNd^vEw_ktg?o152HT>up*#xJC(Enf%)Byt`{grR_-umLbReu^98XAWRdW4tYaF6g}R*hr0>f5^2jK;KZ zKU`tWP;>6@+|8wmbic8tNQicw5@iba^TZ1)^6~AiYNQIm_+op7QpL0fx86y~A!zCB zL7g0%z1}1wi6K5Xe@u*!>^|%$Yf`1?rw)xmYgaF#ViFK)YJ#h!OUS+pdi>*18#fbQ zx=cfLLlcJg?La_Z156~R>`Si8HA=o&qD>;fUT8Ye_8@8J*da#EDKe(!{L040EH@vs z^=s1+XC{w=W$b+>5hq!oL(Sv!J(+D|4=No8mfEr(lwbxeUdS)Ybydch=OOP!@<5_M z63uKOi4l)S>r6T%gVP3xn@auw>b0cE9W=J%sFrDC#AR(Nduh}zLve_Bpv*0 z%wCe{q{{I~Zfp^gUnq{&2rpj0v=xrvVlQ@NfTvOlr%+8!MGJzwJ@DGQM?~YbRfTw2$ak4Ir_5Ba{L&t*?1&BWQpe}WFWL<4sNNKSK@(@8smBc# z`S*1lFiUQy8U&lyKOHp3l5$?#3oi^rC>MAOeD26af({7W@=Nq}j2f(vgnzUZf`wAoLVcNGHABZ~pV{3pbYy z5ZEM}%{>0DWN&%PyywiE=d@A(&C%zc?Nm@(FHZ;6%j*JfZdeA$TXADFZ5d46K} z%Y84WOf-#1&2b!lPsVlHzpJrPzpi25xNh6Fvv##9>aiaVb*Z$ck1IjLMk)jsP$e(Mn*K6_i)IS^?kGto_tEL80 z-8-~Rjh;ly5qD{{t-JOM6LafQB-wkzl z{Iuv!s&SFj(V>?W3pql;U|*!cN?aJFwd$6vHrwJZs*RwlUA0H$R>y>}b$lTjjBN({ z-DegNH7TmweK~|(Bf-#QP3kQ&Bb~N->m<>|)gsxEmRpndl~TGbq8gmscV0+c!SDT5 z5rLL2_Rt>+VgVv-j1lBX)KX;8_CrX+{qeUC{=T?hQ9+Ax**HdIyx*XrPFvb)#!>?U z{K;+C2){mj%W%%e2Zt~1x=u=^Y}ckfOq(+=^%_u9C%V1o#(mRXr_w4#to1Me5pO1& zV^w3ythr&#v{_+~jr)Y%H|!Jkh&pk|R;z{e`gIS#8rm-Wc*aMItKmIll~YP)NeTBeZb&?De1CY5)(?51AhcvumfX^iUHf4uGA$UO3$}76i_@vSFF} z^v0t8AakXR-kEbaC#nqNRM##mz+7Zr$jDd$Fmlq&RJtN+`s$R~Y2E+boi8N&V9@GO z$E(n*Q`_W(U!!M-w-9WYH#_RlBY%x+YVW9DPsfATY=ucj|M$m+Kbj{rP=9SsB-c@e zJ1ja)H!U~}SB>*Hr+iI-z}ODx*(nSj&?8(wbW9jBYHF^}&dA7UGGvmyjDS$v*#~Jdt$@XNCXh%Ht{zmeCcGnMKaL7^`Zvl`hB{DwR&|* zDYLwz-od3jzq zsQ;AEdFX|Ofm_0&gj8xq#?WVsBcswwQ@vLt6jDyraf;q)vZ6z`ZHDgpd0~_z9t`vm z(Fm|D7V@myv4%_xbCEBRC@+yS5#p%~`)Gt~*z^YC`$&3YsiV$~L?G`-yhtC?FCwRz zQcZe%g3jU$e)olBJ2%2o0y)tv#W~aVUh%l1eeS{+=p+q#UB$nyMNb>=VLMmlrj{BCg}3 zv1{8cjw#V$GT)|C;k5DJ#=uD=>dQE^C7lLgr4h*SgBYuU z<`B}z3@O!2kWM1++F?~fBwH7LpV1#_;&k4YUG$CaRKsAL9nu>Ta%Z~Yi-CnVxt(geQ|l?US>^Q5sTv}-T7_fzs=kL?{#DDF)rfcc2iZjt#R zBcu7kI)2x46T*l%&s1aj?D;8M?T{^33t!uPy|DMDeN$6Yy;(TOt9!OoqxHf$p-vXe zT2Y6rO?@^h<{B9#)+u_GGEy3T&(f9o7Um4_r8?+*<@s7WHbdDv2Ze$6{MRCFsNT5?@0Q89)C4$`=MTyr(9~dExsO)$M4mtT|EB0aNcdthsz%w z9jJsl>Zm=icRsOb(xhoqicm#k zU59TGH;tAMUeUXAHv$AYLvdbyOIYhN@{Xr3NF1sp=qDxPI!;p@|Lh3e> zTMOTpvde%Ud}K7tBe6hu9LswoBJv=h?Ca_K388;CP+ ziG3dryDoe%bnX+Mvvg?9@;H*zAS2LHok33EmGNAb%^@-twL8d zKO(p4y%AD6Qd$3Sl@ih8Wzl%8K#6Tp>u1&frdAi7VOwy z7YB~hvIMOPoV9yxvTAa+{`QU+;&k4~gUZOr$XG(;n(TS&6il zgk6)gLk6!FUY|WbOpJeD^TexR*w|_5bIS+Py9tKHFnwFk2pyh{M7kD;sm1)! zzZKD43zP;*qE_2~B^vYb@3BKd=pfP^2}X64R|MI@ri-y?Uum>641C6zx1|(e+5(GaPWp3AYI`*GSjES}3k z?wBngg>`w=WS3B%ICRvM@Qurcq#Emq(SY5!f45YRecaA#g%>Bx43|GL`i(vXWS?_N zHV1jO;n(0u#>Y&Vo#cPx{$0ai8?_7DJ$pvzGW?3rX4;6*Zu+RuZp7W8`zrmyMuT?@ zdvzNT)_DDiP_JXJ`h88rY*i=HD>@|WLX*AJAnyZF<{~M6?#4GsDjDP0;Z$1p!$s3B zTiHGS&m!xC!7L(97koypIES{0GXB!NQPz(tBFSVA0Y*UEM|m4|O(e{bY?E!Z$P8#T z+W!66-{ZBd9u4cL125iIY_+BS+BqCwNdywlIUvQG%z|qf5XrwT;W9SLT?c&6HeBRd zq`K^aj?iuz3ANup^ru1x>5E7>BCj6Y^{lmX{Qb6t&T=L=zR0?J^qvSdIKM0s-_^WL zS;jU|Cpjh5c2|=NQ8~J;rrT$9tg#6AQbw-H&ZtJYexG#d6KP($Vd$8YeKW9Er?=Gg z>Wd;f=h`7-Qf+oMKD6N~;lM3c3;IpSN-K8*(LbUw)WtQd?(Sbik{#y~o&K%Ob``J& z(fe^j3+LS6!$YOpz);7Typ-^MYO-fd7#Xg4Y-H%syH8lZ*HT_%%okpQ25g${0~eV? zjC8jSZPGKi=%Eqm8En4#(yL|9$jDeZuGBw~E>UK@lXwp$}~iR8Ly zf)c_Rhin$k|KygbDcU0=CPmWFBi#G^gm7Y9%+I*t*>KVSMkGS6d-Bh&e$lI?0#)Z0ar!q zeeXT1FiLzTbsUMh;hQ1s_WgL>kud8rZaX2Q-2Ecgz2m(*F5;zb`;YP7bspDw`XBMy zMm!Kduc;Z&SRxQp4b2Yu?S0|lmnSEu(Ou8ImTDO7d)=jd>u}!-6C16avlsT(QC}uR zQhw@XL(?Ah`)%GYY#WL9R^4ZWw$E3S;`sac+}qvxf1&@JmqOnz?L(cN1y%B7%YL~G8W_?o%xC}Inwrt~hz~=o^3E8^=C*qu0vm()s z>(70^iMlwcAlYx|eM4`A2Y(kTQFrS*G-Qr6LoA5bIia2d z^TK71j!vCNZ+?1QN|ilgyEVh`xOk7BJ|}$sgKLKq_gp{hwB9P=>z6zcZhC55cy`?M z@UKX^eSiCPdW9|>3XkxIHdrOSk3_eNi@Lg_YDh!t(yl&?96vE^(5pU79XB#8*yWT^ z5uwxhn<2i#@zJo~sz^cIB9cf3wP>{lVCA0H>b^wqY~RHguLk0c_E*1y2A zp52Uegf^wm#$&3(6vakL)U7`8ucGc#cW{Jx@`!>^ND)VVYl*&V6FFzlTUcLxw1|lF z`o9*hy%r1<2Fw0u7yBctrFFl%eYYU;&VA1>iyBn5=g7Gzo2dKvXF~Yj?~7<=#(T!} zs0U}?JUnG@i2iJ`M)$N|eE)9k!(m&m9!5=>71mj$hOjk&qiK93*!M>gzE=E>-zuZ{ z%Y83}1(V`4Zwzx^53MWnqNGOIk&U1^Sv6#Ei0JA}%Sik1k3u+Dr*8YvMj|a`y7vaf ziv!1YSLBehg;Pl5KWXP8+!~xA$+9cRH1g@YxBfU1-rJ)L4U01LhY+qiG3wicMF~T% zsE?N%Swz-3jYO}dq>CaW%XGAK2-L#+`Xu=6t?|0IKCbAgfIN#dJ2#y~qFoaro=P&} z;M1vt&X2_ZFU9@Zhqp1fZVm+3t|6QBaTz2xoKiT)%IJAUMhk&;{FiZ#ss%^+e}|20 zl$>?z*d}~y-;Kgwj@~*P@qsnMS9f1GTyWIZY0JUtEd>{gc+gh!;#@T+>bVS%Bd>~U z*mDgZ)(IQ-fxrlg81fL!(R*kdT>PQAkM?xDiUDzSRLBbhY8c> zq&>8^TdQZd^08OLVX_BK<(2cG^(Xm!hJp-x(?2$zT#;~gOoA=Yu+UcNe+YG|aU zR5Y0Z{pdd;`B*cAb=6!pkRM)Sktq#FkYQwkVK3sf&K}XgA6F3hx1?6bQIYzso>grb zf-hn&B^zN7QJ1cZ&_8xz9Kd*eqIJkQf~3Z@8pySXIuAcsTt(s-AZ6WWEs%AQK~Z&t zTurS`&QkIa>w6*r7fG~WuN&_L8DDdkV!UQVf{kQkyl3bVPCuBPwz1hN%I-Bo#)NBz zj!9c|{pH@5Q?0eAb~S8+x6fw%QaAliY&#(Azj?p#@`Ra5pEg^qTbSOqcM+3eu zO42L+3_kST!b!7bly_<0_RJ<{$h5u$*Bl>zA9ap=A?ivg)MwKjr4G+rQy7aTmqoh% zHz1`}w=T}7UQ_xy8I{37mv;Pmk=m|an&V8p=d;%qUG_C96v3xcdMzx@+(goi8^=@i zcU+fwf^#8fcVx66$OKzlgRC)AW~EHB#-RD>_UrTx-`#Jc@Wl_Vljb*$#CJv|R9+tw z>W5w$THpQiqC8{HlsI?AIjuVTtb{eRvAM1pVfKuvVOA^KHP+eyOA(y+t8P?e6PcG9 zfrz%Inw_I9#a3fSWn^SzyccAWy$qom+Wx)L#A_j~Rm%9*1ABxG`*uxHys5M1hasb; zB*OjJh$(3ckan$Fg_&~}yv0*4b=FIF(xbK?n3|p0V;E`o{Q;4XsQb2`@Q!PD4`17O zM%Z=q??ad8t_ z@pX8S#`^MIMe40+u)0iS*H&2;IRpT4K;l9DCKB#hgCJyZzRn}sNqD0MchHO(ec}LE68yzR^6nCc#B5+t%w2V!mNhc)P#7i zqL_{$Wf*~|kE(?Dq`kl!Av+oG8k|B(Bp5q8t=lX7_o;D7_U&)2NzkO}b5m#jT{l=I z$@>S^>Y28!)3|2Zta-`Vq?^9p23T6z5fMgqr4V}NoJ8b8 zWOl>-i&|{*CW39xbzQEJWIBNa>k=-*1CiGG+i@7UNVR%v4jK+8=On`nJ52=XnJ6#) zks_^&KbfjE{5X%COWm@_v+Luaq8GN`l4+rw!gX+JRj01|u8a|Sjo`=eOeyb={jKrY#F)yyRs z87mmn?3#OOLB(vTDKqWuU~m6-yC1XK@5;y*Z?5!#iHjSe&7Gswuu z$XHS=!`UsPK(QFE+O=IuOFC@p0jaqeLSbRvDw1qz->YUX7Ig9p6Vd9HV2~`gDA0;&qW|_w3j$d86nn#s;^&;^) z8Z+{BKIah7K+3rg635%}sCeJcMFOu$Q@mdHsOCJ?vPKeLc7S&Ony9HOPYtqeF78!o zI^rgEcQNT`8E=pFZCZy4+%!*pP;j$y}jd#5HfB0#$Nid?V1 zYUi*;)FZW#kGwKDbqhzzrEfkxc3LW%_~+2+;ZLm(3uCr9HO$%K=n%I1QV4teu#i{J zSmP~GVkDZQ1p(ESe)rP~hmvfDTYgY9`qT6)T~s8k2sOuw`e}R9>NqZ9qNDhrPZXqG z3i`v*sFQUe^_+1>eq>_UmmP^kY>{!sF@jzyx#OjVgHfkSLSIIP%WMkdS2Uf&KzYUb zd&m1Zt{}O({)6u^%0;d9TTuF8OB+2B$gT8sDe+r>CjR!bMJl{Z69iekIET_!aom)9 zAoKpVLxAxU0q0CW#QmE_D!zv?V#e;SAu&q{4`|OvQ-qvr# zY_|>Ln)*H=18x4CSz-RPaiM_nVx*6vY^MmX#>x_~tdaennd5GTcd~&F4 zc37xW0@94}%ETEVPTiqfyBcF=X~TMN4wN=7D^HWhMj%sayS00Tn}&@I54}7o4s~`8 zW@Kb68FF{=h-KGZye_;xXI_{+cR?Zwwip^QX=d1MQ4V6Iy@EHzZ#8p_0N4aNn3)vg%qYV=$(pG?OLMEc-GWNcT>@75igyQ zrB~041XC2eI%36$7s*0arGNX3fxsX~3PgcL07cJqC}!L<+FgShX|=7J#1|qUy6AJ{ zSr>8!2r@1`?&)aE+j0p3SYwCcdx&}eEsDq_f{x(#iR1)9wzsGVxGkDoJ3^y|H4!cM zabfWrb?WL_ZMo!SVYW{(;(`nz=7^6mL@IQrMl{s7N>^XOuspkq8%~XYY+J{$(ZP4) zeqFhfRAf z481$I4Nr}q6K2n=g!TLN3~Q{tQE13^wd$sOUZ%^ zg-Vn$4ZX;!vH@x1nv>sHKV>r@s?uVx?`08rwaYl6&y+SUy*Ca5yK=)T3D?|$K>>W z*NB9A-rP7p&0mnB9`&!k8Y;tY3iVH29_nXL4Q)C_ed5%!&eCCxKVKUb(w2T4*!>5^ z_v1QhPVwp!ztg^3sOz}&N%s5cE`GzE_k`C%kMO}Y+NC*vnW9}>o0Yg3S58QCq24i7 z&@`iw6`0(N3GoSfPxaF2xLi_eL&AXZyI(lF6 z=%{e!O~VsuHvG2t*tlPpwD2%O(V(PAui&7y!*w(O$QC= z9zM6r+TpgR$0d?(t0GbCiys~tR_)S0?6L8x={IxYxh}Zpnd4%Kfns+`-X+e<_j(kuw)R0(0r3#UtB>UUs(5Vauh4Oo5Dq-ANHIsw850_^A+RFY z2*W*R6~{)%)u@WDNm11$TK92V6ltQ?Yn}K2A_;Qt_L0yJi3A-G4BIB%_49%t>*T!c z=L_;xrotoZh_=YRsP%@CT?LXU9RDoLkxou#G5>n3fV?L+<)$$lg;>gSLRk$QEt zG6)cIgkCB;QsdrqkG4(Oa>mrDQ^U5~ZW|tZ?6J_X&cQgcS*Sb|WcE>dn!G3n!>#5s*`!@CAyj!0Sf7_>RSo{8Oht?A#S&Wz;jm}X! zpAq?JkI?(Eb3?n~*Mz#Qjt+IkT+J{i4v-<_({qu&jSvGJKvG4Lw>~C>PVw1!_Fjmu zC@#4|xJ8-CG8sj-8IPP;h`TyUQQ$Q{7-b|rYvfvXg|Z1UX|MLHzEluozhh97H@bMm zafQ=Jc7~`m2bA+Hj3k{COt*7-sWUzqaxbzy>`_fuka!L(4GQ%oaNmhU$8$LkV{_Qm zZ{D*DK!l~u55F<$)Hg%8^0USJbiuhzMj!coaZj(@6T-vs8aTeZ?{hlDQJ|5V=$p&{ zB;NKy2SvSX^`=3)BH;DcUk_`nu|^m*YE zg#~f_Z{Ia696K!354$4FZPz=r8hm(Y^~AqIojv+F$z+~zlBKMdsOub6Gjzs|bCl8! z*(h|Kvs%{H#OFdK&Q}XIKRhl;oQ6vm)2B}ly?XUZfr`$Zm#j?cYd`sAnAK@OIA)tT zH!LqG$Kh;OE@#~y6m1gZJI!weMopfX`ha{m+EriPZQaoJU8ebGWMpJC`$+bJ1q;GG z_uLasIN^lw-S2)kTy)VzVfEEl4{NWzc3jXFUC~6}L>j*M#V>~Q&O0x>^wLXVqm4ES zUAuOD%Xh1>EceNl&ie0%My7~cTy_(zzcc1821y4==BsxLuT7tmYE0F29}`K2^jWp1nm&DE+cgrIzBnHL?%}V6PVL)-?j75N=f_V^ z$DcfNZg_melyKAzYlVqZUk__mMy6D>RCB!h_X4ns0wMxZvyt(;e^qoUM{E$>lve&sv?HRy z&lnl~a6vARdJ!_wW&cLz8T=bZBA|X7q<}j<14VS%Ufh~hNv%W1MbZ#s_q^H1;&t9x z+}CrF0O^Q~V2OlJe6b)*B|IbslIWx8Cx?J*v_%urB0}%}q@RmKB2A*)W&Jn<$v7cn zNEOG0--_Pr65UFJDx_&%@jlY;e9naoN=J6AWrXD!$<|xo=3!&QDVGmTGJDG28>AY! zTb~)9YUF70(L1aeUY|WTyfSt|_{652!fyTMghB1bgyXhdCG@)GgwSgI&`=l0-K*8~ zF`@g^VWHcqYlb$_@T}Y6tDz3LMSMk{OL$J?zgHy7BGRy4e1-_N27a6@o+CMZCdxj! zk#gRtp@e{wN|aZ+^{qb);rW~6oAGL;P|MChYNeO!7LGV8l^~b4@zOMm6H7URn9HQ-PEr;sIb z8V&NWY1Dyk{fj7$>+>>K&JcNyawi2{Ifr9uoCb8nSotsgqK6{&_NPyu#dG_w$i3s4 zKa8LI5(yXKUN^p9I+K)nxW_S%7dk8J#XU%4Pfxpf;5|jMje~JBmrNb)jX1w`DCVst zf#WE};neiZ$*DO;`7M19JAtloMjV+hdOJHXQ^>!T<0pY;(s}>W={!q zGF~P;Tl}}&_Awr-k}|mB@1ri)y&nH7BZXs2O>jhA)cN|(tEN4#TX*gq>O-8fWU5#P z=n8#)@VCYN-t~(R?)+I1z2^9$!<@%t%Q@(zwWaT%(5mky@!(6J48EU8_U~>P8rJOA zIyq368?57;@0w0p&r?c%9CwazdK$-2z@ts8x|AuUU(UduotlTrWn^SDdo+8JjVPaa z=9yuG4K@fr{pn8=VLtWLQ^WrI@1H!V)wuG?E5li5ot0i6``E|A)mL8~rcIj`w%l^d zw+r!d7Y0dT;(-5i#d%csUY&kqe|m zH+O{U`GzQ^vC+iJ!oLi#JV`d$mC-SF+MGnL_t~s(axUF9JR0s%wtIDI8x9)WKXjZo zIec|+>#*AVk)hS&{|I%r|0L8!GF$iH?;4KuCh8cW+& z@2H;~I~sNAv`romKehiaL_$e+5MLPyN+m>-kpOAaGCH=8=jjl2g5wGi^2|ATUW(5c z*(E<@_3kqx8I49i*%M8X{UF8Ku^8y|$^P^lKnSa}^h9ROCK2kmIkz|i9K-iF*gJ&( zMDpJu5_DVOkvaEOf@8t8sk7#65K+JH{Aj#K0#5cgi}W8rDwI&r-KQ>(W2I?X4LH=y zY~^0Piy-O?J>XcAmM)_NS=jPZ#d$^J>9O z5$f+dC$xI@y09>k&@uI^hIwtehEDC;q%~`ap#({W&`sZs_Z{U}PXi>|I%WK3Mw0ww z)QMY83$5-uJG6cFs!$i#qo-!}2(Q<5PGu*FXq!JrKN>1`|Du>Dp7?jD8~0?Wi(^+e zc|>Rxb-MMf-w$<dZQdy1evHLVC&YQ|_ISKFUqxbA8T_eGS#9f3SFU?Y8Sj;3&tI@0oPFC%VcR|n z!hqhKg3A${5$qRDPt8~QR6KuM)YJHHx?0~R+F%`03ExHk8=3mvu&FXKGBVx?%|7qt z#TQ=;GiJ;PhaP%pib{)8i)3GS-E|G$coTp4!ym%I2Ok{v+H0?{!wx%ygAO_<+<*W5 zDW$17z>=q7(;B_rY817puTN2DO_~bNX??hU$e5HGe(>Pc!q8Du!#&SW2sb`CHk^Cw z^WoBmM}`rTXQoDGcRo8lk$128m~;aW>SxBjo;oetqZ52OXWqi_(vM)s0G}_Fqw5 zzg72X%P&R%W1{*vfQYIwl{#G_84K|xEmdkR@*(xMtP^+Lj1ok`H8{4vkq^e0XrL%^ zHINih343KTV5*II#6`+jEiA^;@)*eJX;#5CAzd64Uan6{u04{=>osl`a%9Aw#m+AR zX#7BQ+qO@ef2=r$)OXkJyvRB-vJyZE#q`;Q1BZcZs~lw!^aDgbX1xA-_;|l@q2uiM z?8v8X_2i*+{4@r##)xGob9&eZx)!3#zN2xTjDn#J>Et542)SrS3DINDpz8NP#FC*+ z4n0$jA*BxGwISV}ulK4QKV(wJaMz~pA4GGdhaPnGqVz8Sy%lw4e9QUf$=Ij%~{82 zs`*%pxGKB9i+YoYUyzlN^QUlksGesn7FSYjA6_e>TzF-u}8 zD}&P}5z@y(-Q8zKA{&X3>xkd$`tbQ-y^)uOXGV?-lcq;93nJ5ZpB3t4uh13p?V6G5 z_FY5WLw_jF;h64WXe88}Lx?rUmS^WT`g0^_RQxYTmF@AYHRfgxBhP{(&D^gM83IhV zIH@^yf%8`5O|`%rR~%TW>0w-hIBFeFsfIO+b8wvx6?0rIj0ZxP8WG01QMO2*jYB;> zTLuLC{ovJc$`5B&S8`O-NJL@rl-#1<0G+9 zYij@Oi47f-rHG46ckfh?jc)DR#L0eo+E-Zj)kzE zIBb6S^|1DlD@4OWDm6nLxmL@m$&>x9MX<+4!eKp>Mpe6ji(oT2UXO-?CQv>XeML&v z+BHg9Jh#ZV2pl6s#0}wox9MymYtp8X6UIDJ%%G`;)KcNi}21(BouSZJXjg)o>&A3{FJl?MUSN0G$w(D0RYXc|k_U9vc^9FGJ(S2{Xd0 zQ)Zei%h=)s zDnV8m;0)UmN(ZDlHwH#ILO@O!*^KEDj5!fqr5Lq9sQi|UfZBiA8t#!^HAbjhm-pR^ zEReF%p7$BwwQwBKK&hq39+|6#b0H#{0*W?^uG67GhZpJN^h;TWj1-O8TrVe@d#Z*g zzHxNFI{xe6ZplF~L|`>O^n0nmwS<#HN)7ySB#@jv?@K2*V35CB$QGCs=QCva-GFR! zhNSJ!sz8o8G;8O=WevJy?ce~YMwE*llb#5}UwY+Dvb!XpH=MGyKs?6`E#^WFpi-~j zKqd4Tbz2zFVQwnDN{BLEC(EQ3=Grt5*TA{yfXsthcot+kZPypq0&7`g;G_e`Noz$F zq?#)UqFYnr{eX4F8Yaq3Ps}x(O!RAU8BE@f<4|pVWl&tv)@2eB+(~c=?(QDkAwX~s zjk`MphX4(Y1`if2xO2U|gM-8fcj-Q_;Jwg1)M+88_0EY#Z1MWxZCYb@)*?Zz}ozN2nU#W3O}KHJ)f2DOU851*2Zjm-)zd6 z+}Ln-7f8^AMG8|?8DsvEqK-Eyu5CR|FrpXQkOi@3*74Fsc*w)~hh%F1BHw+{tro_tl^XLnjWZ<@g3vq`*m=Po$n)&-2zVZ`MT%69fk3E%G z8b#9IyEGt{9a0An;)=i1+EKl%(P5hO4ecn=Wx8rBlAceu+ui-N*$-jLz7Ok8OvyM4 zQuDQSnbMptTdZ^x^Eej2+TmyqoBS8jN>#RJa#n6M6AUYtfX>W{VOH$oQyhuvjDj&|Mj99GL1NaM88?8-M4 ziea>4Y&moy{y5Ve&yFRmiWrw7h))Ua zz7sJnQ|Dp(>+3SmFLo*lLNOIWvE{BNtEdjmP5Sa#qQqlfEx zHqcaJ!-t&c=;$P(!O}4jh%^tLA_kW1X>kzVeeM%=^BsY6_bg4-nQL|9yjUo zipa0!H6sG6iR#Ko7TJF8-hTIsF#8+OZnt+S4oB`orfe-F62UM7|H=QEaW!<-Jj9RZ zkf^sqM6e)CP+P4&g9t^WvuAKC4o5VG_+NwW;40GiMRf zA$XwBOoNcm67m%%EUktBP^M_eW59>5&HyIaK=`ky4;t&aA*jda=Rw~na^XA}5bcmQ zx||AX7Ty$bp}Ov@kP`y@)bZ(K?tYWYp8iv(I&v3ts0Lh#^~4(r6vXN>=v4gP$iS<~ zDY?7iM+r!PbW2J^hH$`gsjIRo?Fd|E|Mi^?)p>f~HEWb41GanE6zBMu7%nClX;c!s z$_5cZ%DKve3cH@FI>piICg~Gj{!{W!lRZ{yay(fg_}Tw8^F`qR7D zShh?Qu($YZ#-FTNwD&@u5C(Brnc-Q1t&V94UdE9`FVV9aT0!vebfhqi95JRwrud^v^X z4N&}9WJbylVx+orPaf)nb^9nD)p={SLrG1@ZPWE%_2#^RFgAY2eOTo~*?;|9> zVu)ln;)(m7hTe2GpJrss&LhA!U`_0}&2J)pcll@eYP2JKl;GX?Z%=6n*5n`Rs^Sy= zc4qv8`|2CffV|0r$0F#yX+L?=1BNDO zFE@t*Z9LtvGCf)SY)J_^KK@=dSMv=SROx_W5y#lRan2humRDV?(!B=v+oyATh}~>$ zN_0)DM5DWu)T<8zd@!9Vo}7NoRrfAa`RTkIZRUIT1*^jMsVk1%Xx$h<4&>N_C6mvk z1=1(HRh1!0LcDy3ryuw9w8|2F|hZ6O=!fxapkEy3t zf|PId{CK-q7|MTrK*}aU;;-C|!3laEMKD4#f>n9k$R3a9%nr_m-W|bp1)*UeqU1Sg z1&p#vq|>fb;Pivc3US0YFROC0Uc)?U8aH}^nSPM$QSetp2u(MjB{e$E45#CSG7`e0 zBJ^O=oAcGi^oH$Zv-CxD(jeebWwCb^VM|G}N#PZY$W4o%5bQ*BTxi1Iz;)2?5fPAC zi}OB9ie*VI;%7C7*ptXz96oE|)0aAE#1+?4%eTV!n9XsAs{ey@;J(%UUJtVz*b+=T zvIKR+}Tm#O^>p?e>Tl+VL3OmWz$=A4s)UJ4B>`u}@aLx2D{vB-_Mp3n> zBm&q_OmkJv;D&`bNt47C7xiRFg+|NgNO;H0Q!0#z47>a(C+oO|e)7}X9*gUqzoDr9 zLL%!;lWQ&!`DY`t`Ox274q&QfUo4MFDnM_@LO@0c_uYB!EVT1*X*~>A`Qr%DI0ghW z^Q6DDsG?W4nhDc)G~r-4`b->b_63RvQ6;~Zcpvpp;Jv~xmDg&}e)?;i^*h+s(RMxv zz6saBJY?}mc?*#vCM1F>ffAj%rp-r-v1H{#D&8qgH?b|a$$yGl8ChWeK6+U4EVs2p z)%TU>!t*ZWh;Dt?O|9)@?WNejNc~e zUUh{aeO95hp^9=zqj~E6r9E54(ud8Yyy6G29qll!-5P)X_eJk0a8s{Z-+{2h)9O@i zO>+pPD^T1pbeo|~tValcXIm2+D!(U0>PA&A>^AeJPZ#XTs^{58x_x%&?`^|Z-X~$w zVz>UtM!QC-A9C0qKbvX`Jfr|C4{%gT`r zfsB70JCM5qn33ex0_##>Ow+OBqFqC1VYa`3zRzJ@G~)zH;JqbKK*zgN6#ej5;( zUx+s!wDhq=uA4?fWBo7k9QN%DvZz{m-k^AMVaA!b6-|Kj`jEWk_2bNk)5&zFr^B!8 z_9uJVKl%gmg)dTW=I1HQP~1}t<9uB?RSul{9RX*Q&p^VSe7;CovNs_I;}5?lT1W zsAWWy)-cjTWRazS~}?FI~M_V2(7yz-!6K1a?yY7n_@IJa@$bWr`gJ^KU7 zj>R>*A-Ja(qtLt}3(^M~+CHlGO^Nxnb+K9KFYqY!Ol}Waj@z?PwGjSdD8P{Zmh~n7 zVl<0bS7yH06cNHspF-jtep^O~^xb)CfNxgD>w+@dN4Zy@r0^8aWGf$1zw1!_0c}nG z`-yZsSev2{)djxt0Oi<=2ZWseEFg17DbnF-kW`D{>g%OW6GWF#)^4knG=lf+F2caZ zYS^+~@XO{@*cm%Er9EMTwSDIecJxiN|G1elX+Q#Rb^=NBdPat@X%?NVdm^>KXE|6@ zA9Y82dhgIpkLgh4^Gt}+76RpjXOHQ0Q}Wqy(fY0c#QuJkk{`_@^J3EQq+FwRsjf{= z#|@3xx?1F6Md;)3vKu#LYoT9tOp{ZReHyVydgyzkOVs4Iy|Q;AG9&r2jfXWOI$G(W zl|oqAt6jx;5@u5pCiZ>)CYlvWU0qkp64xUn>gz?&jB{^0%&ofpRN|pX>ut+er(kny0Zd@8+HtAZ)QgkhroDtulomMQ&b zI;zGht_12C`SmOtKQ~b~BQkh76|;CaTiL9X@wrQt@C7^6e{fW|{owZyvz%=f!8%;Y zTnwA`aYcpmaZ{uY9(Zjx$S~f`Ff{qDxK$ z4c6aJh>{|=C^bjD^c`}jS4m5GT zpYGncGc-BNh#Sak{WX%6g8hN3Myh#Mr$BvAtXg^$Y<2wuY)8aV0ie8^o}hS=|IZJH zfJO}}*&dF-cwI`@LYUy=+m11}!C)d0oz9Oj0e5qs6JzN+t%%eDLf=nkX(hJrbaS{k zt7he06s$AkO#D*+@u5S+&YoJTiB_XI1Y$DH=5BajTdKIISvE0Kp1bHCg!OndUu@@n z31$swa9Av18tZ>4J&ludm!tvqAc`X4AUay=fB$2i4ODt$KZt-bw2rpJ`O5o5g>JB( z1IeCG#qx<2O7IiX9c$6O)^V&O<%St7`Qmozb1EB^{JKgH zPdon>Ha|myAM6e$Ke#=<&C3?3@WMVN{laJKe8VV8Lg z&*T?*M1-`uw9FK*+BKkipVT4=uLVerx#SB`n$CYrI6$P*vp6d)Z?=Z`)pK&_Qt-tu z)LL0C)Zr_pa!}jW4D%GX1OSi3OmetThhn!_Z+BDbq?pdb3^}mK@}dDA6Bom#NAMkq zeEj3tAS-J!C^l7q*7v!2b@fzrQoQ3fulwKKYK~t8_jjm+f{a{ALh7=yb>VZ*^JJn% z-f-Bj(j);pfk}U~NT&P#pqn&;h0s}sOd0@zj^Mbf7!5`_WWzU8N6 z;)gJ($LVnxujZ$@D-mv0S5~y+CJQceF=DRuOM6u^Bf_zy^@$F>mXme384Q~%{noa; zK<6z)0-&>hHKi*x% z@Vj&^HMt}laM7mvcbqIqn)ELVV)cJOQ{u+R^xi98y;+Y9SmZZdYIHIIuCf73^|1(l zd$arDUA=b;N7vj(A6aq>IfaSkYZOyc!*tOAe`i$Y{yAcgC8t#c(VNai0np(ShvnGM z%0IaNQDXb{rD2Bm+4#C1$9M}`=Hc#_r!7ehhT%4uTvlp3<25o=BFqkcUc4us-<{}Y z=9{ni(WCEmAo-&xj9C(t=n~bu-j}-W^~d>o&+pAe{A9X~96~d{+afJZ%#24DgafaO z2{ZB5A>dx`2fL~C&E0IriY+2xP?}0+D@VjVYz46P#MP6dRi}Atf&*l;B3BE2zY@Q`%-~{9K7+!P$IP>cRKQa<32G1G= z_%*JE5p@Cl>~t=!Dma+cZ&zV8A^QO|XVxQCQYC#dOts^MpfVR zjokBYw=n$a@xBnT^E-ab+0d-6P1y-B&Z4&QZ#KL2v0ugcsAz{2#1>4;;Q>TjVK3Pj zGZ+;0HL8f4*1|~Lbmq6tss!2HX9`7|@+R}{7J@&Kt`l;!p_v{l{KVcnIjJD6K!fwq zfG18NU>xi!t>|q(Hsru8CjQGV9EGW=S*X(-kmXHs>dp6c&a}SaXRN6qH~lMZ81q7p z{m)86Oa<|Nal8gLN6j`n<9RaOg2QmR zjI9^@F3lr~Y9&lL0P+i9F1RcUF{sEzB*H#l_5nei*L*UkQ8VhEc~5$j`W zhcVGf2>b@E$Y$#`TVL)Eyt&TyX3F$xI6ms9csqgWn+#HDRMhs<7dMz5BXeqU?Qv%H zv%S>C7OduPeiw)lMC(>3n>Rw_sJVyB*g@*uetY&TcGU&^foX3B;D@f84A07n?0si4 z#Co}}_gC6{qGmQd#7V>7FfXqF<#>QCe`!ncKMRMvr3dhK|FvFT-3v$ZKZ^@Xv)R?D zjs*y?9XwC};;H+k3m0h_`CW#o;Qg%`6(Krm-Y@?$2~r5f@cjp;V8RyOQ@Y<%J)8h@{PuJu|bZ$aG(zzu29$XqqLPSD&8SQ#HJZGf& zSPxtDXrK=IZRCHuX%$t2P4@5)VNw{*j~3zmmMGPd>I&0n>o|&+^(19 z_IPoA)N-usc0m}Zd%a`DJftuK-Lo;$-M>2f#tr3*Q&sY?o7NRXm^3@HA!3A0`ULq_ zSf0->cAj&iA5HW9*k7>+I<^@AT(Z@EZ!m^P%F0zqe4LCK{R4A%JjsX4esv4J?uFP zG|=>P-SJ$&E>UZGA8Y1m4`{d~3MLbLO!xErCV!t=bCDJSagb ztLfe80AmH}Gw{vHOO6bdrj7WGTI=$dZfND-TfmMr!=9gb`7N!le|8*H4^BM% z%i^9mSBwc>Mo80rPJff3bal&mJxQw)@s3vC)Fs#_hgbCZb`&R&8-d4shX;X8T7`cz z@#0}f%5{L#*ilOq<;)s<;k!%Gy%a!@ALZi8t@QvzSDBXY|HF6lX@5a^D&!WoBKP5=H`N&;m&pT z(?uZ_?Tn_6v_t#t7Gs7`E1u^Q?~UBNKe@fP{Uv{6+5gB4lt{Ek9^n2PetyFaS7wxn!;;KVVK6D_YOw zyLr>lopOta0ZD9I^0Q(UKL%kNQ**`ju9=gVUw?5_ zgdpXN*rNl0aCegL@)*{E-EY~=yVCOlH4Ffk(=e6K>1%U%W1J>BFxPh=0*jKcy~_8~ z(V)cOn{|3PIp10;^eH}4fcSNV39kNrd}gi=N`H|Gogc7_XJFCQ@Rb}oGVuWGUj zYTF-Cgc$HJ$bn5w%x+?bf#=o|`{Fb05nIC)EW6 zbryucj?cMh={renZX~Fb``%4v8kHR(%E0R&CeNGT_cs zfBWZN?Slw5h|F&1|nZOIo-h%Lk|AZkg2a=_zk={&;!aod~>q74t*FYz0u`g+KiQ8xEsFM>R6=}M% zF3g6u^g^Q6@%i!E;Y&&yisZIbyW~5A^KY-=)V`BGmWJMAp-~!9(AHy-`zT1budqU6 zFWdzfCRpUD2linYq7RPU4J&U6lAOqg=V}bIit~=&a?rc!4m324yCfX0l7ec#NbFT2 zLM&6;YRohmM?W~}(Vn;7{;aam>Y~aUIKLb+8yJ&;IAD>Rel%F{HW~WO%)zJ2uyau; za#_sPA%mTA@p`C_g8&+JK)qjzO(US%QeQ6#wXdYn?H)GT;yNc;H23<(!f0A){zGrL zl%j#6+O?jLqV)i^KF!8r{{VbhQwmuY3j0eK5D6j2l9m8#0sk(tp zBOaBkr5@zeV_X}NXe=v_;Vt;)P#~WuHKX8+hdU#?Q!`Uj>(`Wb2Vw8!cgAvAP>NFn z7p>^^up@`G86gw4T@X+5^vTe)kOKL#2h6W~w=g?v4?jjv9qzS@KfHt+1L^J8gXuL> zvU_a~)R>fG*>u69#&yCnMkCD=eVc6l35W4gzpVn6FSE)`7~Yfmo%F9zkFYb4CN5y; zsAR?80dIGy=Jv5oOSbHbksY{1nOu@#z=uVQHa~8!?xbnduYCTH+6bkWXTCIxY|yJH zneH}=^tC)`ehOn>9C`piNb~xd-gajkW&Ki2o!A+y@uiI>5#%ZGPdWd+eZU`Y+Q;jW zPNwjY3x`(O_N>mvs=Ik^4nK+2rJVb&Q1sENaVIyb3!t?BbIBaLkZAK^E+y<6rPtv+ zxSLLT0`kihhU!BegErt_6`-2tV?^xY#P7;qBnH_0%Il|vT0GK9wcaMK|Iyb+j?LLq$_xzk^AY&c6ya{U&(>=8dE;XJVM5#{InjXA>0*g zn(|&$rMDv%)B|*y>WmF8g7(}w?kwB%wEuM3eqc&PQN!g?r7I!f7#zAKVFm56JC^N* zNPZ#^+S4|`>x%rR8Ec~cr^z7QLfbI*xf%UftNT)~Rgw==+f9)+%kyg^AfSic*r|6m zWx(7#le9B@o#a0XLers%q3gh@vrmj;WyII3*t3|K<3zaL*CrGWLP8sHJ6MAC@#pW~rag`?v# zWSJS(2*i$`C>C9R7hgWMiNB2^W{)TGG4Q*bKIXHCU*Gd;@;%y^!^clq;D_?v*-8cJ zp7r(?`Hm}vNE~C#OOQS8$aE7}WI(KW2zED#H3V^2Y;2Y7tsG-Fa^2hY46l`0+N|x6 zD;B&;*!leno4lnu*Lq)D_vO6x(hO%}R31(Lz%E#byUF)u)FZ2*>ck%@poa0CtmNfm zdQE7|E#NHM82dody=UT+9KGhK3GM`)6N+!>Dz@0wJZ!1}ZDFCM3X-8oNLlfu7eOcm zmfBB+kHu>FgnFM9S`I8C73+qIzE7FPEq&d=oz3ZW!g zq2FrRtb+@=3}Q(+L(Wa5Xvvb}hB7>sy9=Zj;__-$$kO$7qEl^Iv9YEXTKB!4Ga!tX zPnw}pRIg0_HnCEq3~BJ6ULe}UPX)r#>yAP<1s9q98u(ZxJ5zHI7qw=|(Hbty*#_@& zZcohr`5sm{vhrSAm>w=Bi1KShoU36%cKHT0%{y-IqUIV!`dg@EbLCMbVm*4%6LYiI zw1yUSo?7DESFaKNZ7Mj0SqJGbq<;1i;Q*!A(hTbxKwzt~_h2O?84{h-T<@%h6qRKe zbkws~)wUEYh277ST+q4r-jNBV=9EKBFVS@|k>+lG)Ssu|sq~(M2UXdWR*fksv+tbj z`9?;!HAPxWel#dJu=OKvy|T$?O6x2PR<7>R!Q>C9B3SsOhk((f#5$8e=g%Jxb=Udn zy9FBI)%TPX2ZFe@uy7<8So8+wzK+n^g`UO4y=S4t5z8=H>q|v^e}4 zkt>y9J8(gZAcn_52XDyWAL&xfXqjqmD<~OG_Is^U6;ar_OMK)>(I7A0q{8|4OOR9$ zcYYN+gOO_z&K&W^yi1nO=jiDLr zo4L)$yXtgb{)pnc$$)X@&ALm+#Tci+xi~kq-z?Ug^Xf^I)HbED{D#t9>dkzwn`k3M zIHsqpk@nI&N-ygso#O4mFIHtGc^?(!T|rkwiZGf;0wRvvlE4t=8a62~l}b%s!P#t< zGGFQ#xt%fZ8-QAZwTrL%rGxBS4(k)$yG_m6>*q>VIU=8L`b3r8zUM|I8lhEK^1L(u zsheS&GCh?*wEk1^`HD}GA^Poe(oC^f*eHP}sz0f9OgocW^cDc~obL)Mq2UnZ^?XGb zaXYB$qcK%_On>((^7p&K(0(j2Bf?L}`HJM&H0Go0C*8mluYAkqwjil{2({@T`zn93 z^x|y$Rh^`LIw3KZ+8<$B)Aa>Dvi65fR@%b16^uIPzwvQ42CtzhKA3;SpRfux-$ASv z^VpRQQr;cBR4&SoUg#6ZWi|P5j>s}#OQu8@ALFlYAvSOFe>5ylt?IJOQI};9#uhdm zD%$?AylG2EIp zuAb6wVZtPk8g>KlLQm4HsBO57s~@IdIf0?3!A=@=NP^hs?&d*u)nAeKEPdDC(XYbH zLlNtm6hDWx3#Y0m9hGxZr|iE9V4w1 zPWrXtO(L1qd3wR?uzaLG$#p_ZtDY#x<{ei!+iGLhL;}*LFjJY7I%v|9zcBP$&Q=Dl zMDC<^bF&R-hto0{*Hh2{&l%(!9rzB4Pfq+~p3!WNjBmMi zU$D(?+{TkrEzI3!*|->(y><4&KEN(7PORCpVEHPEHxTyCwZCx!$JD@G-fqM=nsZ?( z&xQu&X#5`4>1=fq)~+rroa{^^K0_?lE1&5Y0~!rjM(9g)M((L0%*sr$XsF$)|Mlro z=!-C3%0_WUxR8wZ*1hYJ&|V88;ZllE5&XQ1=nUd$8f75*KxK))zw4CH&SuL|hwJ+~ zsxc*JB^FHkJH?bzB6*EL2?f%qzky7N`-=Rqst*IFI;3=m45PhujWp-+*Ft^Hey0xm zQ8pGRsU1SFH(aM8Kso9eor*HxJ8 z9fDdS|8@nas{dZ5Rwe~jzR8gPURD30wf+YL1U})AzmPlsy=cT=NT2^+N+F8X|1VZp zvGJ1mbpgq3R{eKR+Bv~_|KHcJfPij6MwWwE``BqfT=yFAzz&luFiwjRTTfb`NLR6@n0}Tn2PTjj+LmYPbrYjlc9O6cv;+``q~@K17N@KBia>C7MIlo z5v(8T$;zgc9^Q6fwy*K@2!JGQt9tMF?4N;8p(Gwl?1xHiy@WRTmpc#{Qn~B^Ajd|A zJsx?$su51sP5|IanAmMf)ubl^Yu91>7sA$6gI-6L-_?BO?N)kW)6X8@xiS5L*AW6x z6&0(Q(EzPM3Ff2dFBLvK+zl9F?+ei~^eR}Lj04>PHWOnzJHUTou{Gp24c%{pl1$FI zmCrE?OeWgCS^@0$7Qi5BIe#3X*^d8qLv%{Q0g`>ec)I~$9LCYaIoHPPx-)hHpswww zde2f_QV5v79oYaxa2HL^Z9P?VL6qGF5CD->g4Y1f+I_Ox-3^Gll1FCo1zdOH!1q(? zDxsr(M=q-wE+cY%PRb~d{gc%7mM4yCcDKE$7Xuo~ZrYo_t~cZh^BOG5k{aIj-*1-z z6E{J{Q>wlO*#fm}%=9*a2T7`o&etzw+*&?4E2uItvOj?~{DrOS$6s%KU_nS&cU-wy z>gZ=BICnUc;4O(&N`}^r`dR?Al-yJ4X6z88gErfjqyMXEPw-MR8OR)fo6v!j&BOVV zQ=;F#9t?Tb(8rE~=Lk(-Fw?P&`ab~C`ogYsxC!BUZ6PMgrP2I2 z>HL%Q;9nrkPukc3kYE&}`-NF-os))HAO~DIDSPuDs14AbVE?CBw~s79NI&55vY))i z#5(h13TVIr1b*<0{hidTHo)SQXG$#9Zo^Voa}iyB$Lz)3abJcfd;$zJARz>}9)T2> z3Db^rUI#;gc38^Uu~|Ev=pfXy!*aRV?WPN_JXn?=osKeg`IJ64r6V^l# zS&l!`)_pE^ATx9>EXyZ4rY4to`=;qx9&*mpm#5Q1?du=D6GK0Vpyuk(*8o8iHE@Cd7O}{$Aun z)H-+{UX+Ci(PRK}xa*LgEOvyyG1ByK-Am-4xMBPf5jK8(|M(IDIfIGm#&7nGo(Xvt zZEGq4QxocQ+{T*$AWm}j29y+kcJ>w<9GdH~qZuIJ7Y;${RX?krBXNG8*AAS4W9&ut zkRU?dNZmv*aNh(BP~zDEUNqn3XqEnH+lHfYu`}D4ykmoASF6T!p!gEK9-Bx`GR>|tnZm`j+jmbY1xnVW z=Q7MaxfF|N%PduPj8)Q>nIzjc4HN@ov)n>HwyW+G6@8 zjjT1)fwj^o{)n4-GkyhFt6vgeP=g~|J^{rGCpobXxQSbe-Sz2D#P}jKTQ3#yST?<0 z+;IrS0nrZtXe20wBFW^b!VE8z3uTj#i;27TlxHT*MHm^i)WWbJp9cio6S>ZSdbA+Y zNWjFj@>QnWj81=qIJ_hV1OWwW9SjMj%Lc9lz3^P!t)g|4OCxR+vW0ky`ynA%cZrh{ zjo;A@5&0}==!;Jfr>2l&R^00mTxThG0$y46ZbEuyPUbje=VQ(h!mxqj!Yo1{_6xeu z;{bam8($>ofHGSTTF*!NWoZ{t36;sgRDHA_nF4O|^LXtlGf-AG`yJ{+j7-Yz2-G~q zURiGZr(9q_EWu}LNxu3O@BREJS4G&{YbAu;*M6^>4+H`L05M!8;BK=2v&4u&_sb_q zU41y=pX!Kism@3K1SS`Q*DM_mXO)92EmRgEE^MR(-w6n)F}3$QEBr4@^YMUb+Zc}K zP6IF4ccjq0vY($H1=0 zU!co|d@Zxz+HpL?mM21Hiflg#9&<-G6z@nRBwXh_DgXj`qex6ms;-|yO%Y6R!66ED zilmGc;@Eu!7oktylOYMa9q~{@Bk}uGErlle@d_63?1~ockDkr3#z?r@h{)RaA(v(C z;~QpVZM3t4&`K>FU4c&l_@6UW3!+8NUla)4 zaeTP52XOH}OH6rd|49(OD$N0!OEsf60SO*1%mC;Q_5RAzV3=Cd&+b>H>?Gp!wyK5L zLhj>!?D_lxRIG%6CVfaZ;e~mgq^Ca#46#NL)$Kk4%jY&iNnig8d7s=UL8KEGx+qY$`%n z!u1n~Ml-slFH%rboTEKU4p{;lQ$Ec|_2WxZO!#;Ik->!GnMRVvvxYRL-Ei3Nd@klR z+#uqI80y7L$N?Snbi@s^Tm)&>TKUG>)OsIh2b~C8;I|8H5cw0)*&WfMIfs4YT(SH{ zBGkw@HANi3FNaOJ+O%zTiGo%(cnEofD9H*6-xtl_fSXZCtcS?|C$%CPzRVebd`Sm_r2xjQAySJ4J5o2N2RLMJZP6@hR4~K1csN z(;&R5I}zddOwws~(u#wBi;K9T*&$;sdDJTTmq0)F4IjMr+u%%r%K*;Y?f+?9%y9tKZ((z1kbYpd<0M9U|b_nF>P(H}6fTD%pXlXM66 zS%-Dz-@b{l<@A&+o3cLJ;beodI1kN93zTkZmy{fUv8i?5LG%M&;tl%Y#Lke$WN$I^ z{`Wf6;S===>#`uC-IOHNtbIUJAxl722jL3ozRoXKfUyK-G-6~!{O-^ZmAI|ZSRG?F zx!GE+D{@>LHDIic9An&I&O4^NiWMQ#&9?}sO|*hr4--gITXA+S#j-UlFvQ@dnO21- zdCzS9#oHjb!rx@f9j2rKf@4Orjsx_C`Lzz)Nz)r1<)%(H-Bo>YSGOauJK{`_=Q`mmr#9C9i6T;mU zFnqk}dn~Xc_7Ziy=;?U>$gp}~lPdjm45#5^6a-s{Cz*UD#6$%hm??L~6o$VU1HWLYr(+ zMm|Eq|4RM6p!-x(pu)MwolWo7YcWDu{cC2iNfM1O)K_MK2TtaWKT|zcMg{uS?k&cy zw1}iYKC1k&yU4hp>1N82mT*uox+Q24###(&U+3gw+n`3U%vJc!My z#Q7;%c^q3dvI%y&!#ZK_pAsX6QPH^e{XJ5x=9gd`ZNCyjXwYEz zadXD|<%WbW8MM9&W(U_M8$<{8t%MAsl++WFv#-hzOBi}_KEhYAk#lAgj8VKswTDOk z`jZETOIbV~f)7FNNjx3>5&yjf;Ol9>c8^&IrQS?;=*V&a%9uj^?p8Nt%4Lizdvd+Io!`lM8F2;}ZYbJ9W$-lPH=9F+Wq# z!sp60L8U}z^p>fg5>Jrn(UBCV345lfr|^v?_3kgq70ewNk>bg$Al#GUM*$heS%A7O zDX8{_0*5B`nt!#&mXBeaDXR1fWJ)JX;l{wp;A|(rETbvQ#<(L^(lH(my*ukBOg7R+ zns|#hn~N=e6Mtmf((U=7$PIr-mDkf+5lApmx=Lz+Nl>dfCN-i;_3+|Enk9chPz45F zOD6Z{dg)s?)cw_GX0SBKp^OmZm%`tH*d%~$4Dt$R7_)CP=}1$r_0S4H#^Yi(aH;Oh zGhEwOfHLJ8#{fFAf1J|d`mz?w;l+*8_1svz&o^`#LY zza+IQ@$3g9N$q@tN1kmB0%4#R`u|>*{D79&6l+ded^xRG5dhnhC!zuI)e2FZb+HeJ zWWJq(K~iqkAlCm8Wa-jy^aHb4{%cuMymW^9zm`%E<`dc48;(npZy)wv0ZUFwS+ZQ* HIOu-?HI?Uw literal 0 HcmV?d00001 diff --git a/papers/atharva_rasane/00_myst_template/Distribution_of_P-value.png b/papers/atharva_rasane/00_myst_template/Distribution_of_P-value.png new file mode 100644 index 0000000000000000000000000000000000000000..bdc655d742497b1547446632f5073bce9fc4551c GIT binary patch literal 65685 zcmeFZc{tSl-#)BMT_ZB-Dy@UmRf;SXF_sb9C9+LflS;^7#?GKD)1oM$Y|~`PI!KIN z2xS||7-bmyFqXk!?0)Z0b$!40eLT-`{O3V@_L=G^E_YgM+R5U z3k&WQi?`1ta| zg;s1ffuA?KT)2+oD+>L!T@JIQ z_dQ8=C|8KcGf3v9qeC^os5TRY{nys4c^%4PD&H>9^?9Mz}tnP;%Z#bT=q)AV8$FB+v@zG`L`D_1gd zp!R0oVDzw*c(`M8tdb9}+f&JN(jwck)HK_i((&;=zx&|x^P!FmQT5qYoj%XKigzRC zyFHnMW_*0PGW%ch^YN*5XP9IvI`^o{nteD@d`{B%qP{-b)HHf#W~RN+$=rRURcq6x zP3xjXVG^2Fp55*-Ypa}qu~ihR^op@!yWlT6d>LTQy{E^$3; zm$l#3=PFb z+qd^VZ1(fP@FNEg9*oz=!O1I43zW5G=DPR;>f^_PbK~EQ?BZQRE2f`12Rkw%_ME(P zwdvF$y*`*IIS7^`t|EC*<)PKh2libfQk~g z(YIFRJ|V2k<11wgl|dXomYVAN3OCYm1K&q-;JgVKn{W7;FH(}N*f-d|kX`KlC-NQi zig_hVkMFlj*cq9#nbxb9cs;((X)3tg1AG}M!U4Fkg(M|U;f_&8W@drgsG-K%!hp|M zHs!;;@9zhr>s-A$Q}y;0-1~l__^|5G&=BbowJ(_yS(6lsP*fFRY$M%hztbF>QZMd% z*|}^w6pkE>n2PsXUuM#;o);_llKYi3cqqo=Z9WyHrf%mSFq#s>hG{Nnd9@@4tSyS% zTYB9o5|9&gS~D}JY^LE)4(>5!vABF?rqO+Ul}E7K#mBe*(5iC53(f{UJ`X|k_G0!I zQGK`qW667Ex_;~<|JE&9as2scYrIKL`FyOPkWc|D(U-jI7*arOJhw*sW52`S=$%TT z{IY1xZntlGBnPr}r9bIrM&A4nNBIQ5nJ=pIowiX#qC^MH*?P4trXsllzdRW+Z^GQg ztYNOrQ~K1(iuyBA%s5G%&|n;6PgUQ%qW8=#4X{A^DC=MJ{r@62W>-}V>ut)ntqc3w zo^DhC#+M*Mjk*gHb2CT_TOLUY*G@@dmcDN`aS=vNWoBfw>B^VyC|p)kp3gdFU7;S_ z?4Me(M9X2nvVCUn=H4H)6}20Mo2r62ehl33rGSs`2Ksag*qL`vFi!bYZ((Q?RP&7W`<&hkUSAMC}BIlsG-P~FB@^qhHxh1-cK zEd4-uV6vhlbkW)3sHL@#)d_8ZVE1 z{md$@@$v8ZHP>9$D(XUO!B?Q4gT6xNs9_W~6P*L6VaPMs+#$YCQCJP1S%HBNWU%YC z{ydgN_h3YeWZ1e#*XUGVO^p{OU0MmI4A(msE}l)d4$v^Z=zWt<-z)cK?PHO_S5gmM zd`VJIrtBSxy!T=$KD;SAMZR+7eIi(s zE>7^TD;CKWbm}7>vy-<}R6*95@6_34;iBag>W7C*ua+)#u07M)s-gIy_S7KrGp~`? zS{Xz%VK#=+!i9Do6QB|}?}+yLs6N~rmxY_c_1kq&Ly58hJ-jTNC}ybi&`Q%HYEj)% z$&45vq-m|MG54~>eQYN=8$}34!6-dTyZt%RSGl-~L!`rC)=&Z<-F>_gBRGV3S?rVE z*SiS6hM4k7;LUA$+fy$Y5)?*7Eg!o))0 z$lW**md9D$5G*{xLU!m)Q(p`}GKc3`+D2uK_kN+XGbqH>aXZODoXY5_o+U&B14Q<| ze9R;F3xjX!>7#+Ik_mRn?ss1bVi5OK1$-9z2sNuX_iNHcGpLH4%%JrZmS$fO%9_LS z9f>wTC)0?@AG3)Se-gq@+O=${dKaLR!!2@z$E++bSUJhP!bQE+WbOaLYvp}rPV|0b z5(N*hnlF|x3fa9aybhbh8kO*OEQQIbnyp(&s>aj|efG|I|7hK7;sJ(p`YkbcCDYkD-0%IZVxs-WB!!9)`S5`Urltn_R)gS!u?1 z8yw+*C=zO0x|h>vy@tIO(;wK}g(;uwaKXccW#^bRII703KP+LM>EFFpS5eWN94aYK zwh(N)v8na`KllRg)Z}OQ98-Yph6f^0PGOnvEH=%8+=SW)hvy*v#xHj<;j-`7vg~Bs zMfy%RM_ww*_`Km2bG?68Wvj3#npo1>q=rR zD#u(!`dw;I2KzA72|fXfjn>*Xh6C*#7gp4(?(^#rbm1@kvMWL}b1GCVB5N9jU9Syf zs8F|zy=mN#7}U&dQIltE)em#&a-xJxeYP>ilmEnh9tvc=G`g~x*U|S}CnsQyOO#~H zPAk{)+AFE+%K_{1!Fm`m6pw+^QA_VSBWB?ElzaV|ZaWpV*Oi)mEDF%#~r~ zxo(cXa+p2KZggwFf{uWn*kxzu3R6M>{FcV{Txv5lQL27niC!r_8N*oxm_%$+tA@FU zk8gZ;FqV(+&3BjHe!hk%b@=HXTaDz_Zg|Wr{o@NMPA9`yEsOsk?1(rAGtx?uG@d>d53e%J&fPcATc4c>5BFag zkRbF@S$(OS2nW&y3$pvTxU=mWUlkD-psfw~`miTzdF-^gpE5BLr> z$PsQPDpuR1>ZqY}7JrQ4T1O(+bM_bMJa=Fbt-VFp-d(u>(HIKwCmN|``OH#LFog8^Ok(~HYo!_kszj=-+L!x;W-TjZcy z@37!$^iI`acNf;cqH1}vzZ#L4qf~&wA>>M=o2$}fc8Y1QO@{|;k^QjdpM#qltOGej zUt=Bi)l-QzT9kcirx~%# zY)NSMRi^NtpnNKs=%6`bM%}#Fj@)9kmF&s|bwm2#an{N*Qo-#INk>*5j?}1nSR)U6 zS)sXh1wC9m@gpC$b>FGW%H3{}Yan0~<{7n5b`hjC2rs+hD(PAzYM04#jFEdO*1xBTA%ij8s8KF~r@R`bi4~D7>(_Z$8HZ)nk#2wYN z>u{~THB1)A{Z0NM(d%=_R@_tp3FlD|6(mBj$809NuquQFyyLu+q3kt{U5q!1#3a}C zX3Cq8IwEqu@;(#4TYMZ4X|w@%H!iq|N6Eup_fY!`rA4}Sq7wNHDf>&K+Oy*^qh)JL zV>(arbyQ*R$hmHB%mb!z1ANW&p?MPm2HvJlP9*B1Q;@f2PsRu;;*Sx?iA4kVVd4VR zH&Xkj?^XEA8ByPc=cmvq>8zZ-{63tk`y1+aQ9tp@*8J)YYd>ePK-Jyd7QMoJe1%)r ztSz7oQ}niR0uLASnPZDqzkTYeFnH-uc+5~nGE_N8o^g%qvtD1KD|oz?deF%@vvCfY z)fszQuC}TzG`WMWIhp!koAr9#GupLuj_Zwl(i+3|pey zFzaqzm#|J_P{I2Mg4-Slb)4AQP@fViM3M_`O9RVm$3GgS_=o2 zw>z_Hw?}$qq1KhZx=be@LCF)6z(TJ-2-Snlp*12~-*7HTp(UNtE@eu>P;g82#$ ziRagspfg|h8>#&(Pw#rl0zxV zXwkBg-i>R_=}H#c>8rg3T~WNL-S&fbQ?c>w5~MM^+pyY5oaR!@&SkF^*~JZH$Ohzu z^Qwj9gH79ZR7#-yyabxch%q2kKd+G?=Gd*fjoCd8@Py@AAp|EY*VoK8-@m ztif$Z^+K?TZ>qH7Y7{hype{TouoqPL%qhRrYC~&czxJrU zqQsQ!ZRv}v*+f8uqn6Szr4y&hSLeHIZr+D#&W&f{9IV}2hc1e#`^RL)FdmQW)RCys zdR6lmb#gRUiqkc7q5laDj8E#uvBG* z7vDbdCI~KAUkXs~?07ykbo#6MWal@1O|b`unK;-@vT;`rA{xT{eVuDh)q98mxb;8R z{9z$*sc_`*D}|oxHDf)`{p44}+O5@(2L~1GVB!d$Pyy?x3TEO;70o%lw*?XAZB4if zS6yB9onH0O%_N2!2U3vaJ6xXkDF#8@=ZN}N|5SYC$~`4ND>1$$-ad6Nm)%A!>l%x% zXYS?ulY2n5uTYn;OOrRpuK^f@0TC!|9Ig?)4!1!xsW0>fNxDpB!sdwlU=5UazEA2WCk6F-2w&urA{WR??&TTVN~W@ z3yqS&Jph5N> z{F|R2K|U_Fpq&SCD_cl0{1DS{jB%0z%9p{&KAiO`8@*3y_`0o%fV<4h(RcBGNq;cZ z%HV|CZFavf7C2VMZ4u(-`nS?{6qPjDkU07oG`j7c)IU0QpxeDAh?P0i4I_ZoLLByW z5lz#%W6g~2bj&I5FsVS^;FizivzZLpwly$IS>BrHhyG*cOhKmt|4{YquB+>EWh$+@ zdN(0rqHLDgeyIX%&(5QWjYTDmPl6=TV=7zXlpIa(PP?T<2T6u+@@%^(wMy^2BOci- zn*cv6S33UQVD1jwJNnr5zqGefPA_$m|Cm|0PPf4630y=7A{n@ceV2})ql#lc-%o^1 zc#6KPIROhr$#XnAuG@uVybHB(9~5aexlCR=Awi`N<$WbrEU{aCMnK^dR+K`see50o zUI&i+d!yq~csFBhB>G$c2P)6KmrBJBFU6Lx+rLZL*;3nOQ8naZx3;p_caj{8y)fWP zE$cA}tat|xP?yQp9H^~|50WPwhgbRK_=zQ`x(nJ)Tl$QqjBN@X{@B7&zT5gnSW|*E2Yma- zkkg;G(}i*hqp6Ki4=b2!-G0uNQKsvDcz^$jCZ`hrI!41dvu>bqv3NFrGPjv`pf<{F zEx~A@{I!f4$d1YW(SA8%+P)Ijkwkl@1oiJYhez|}v)8a|YBczy!KVq1J& zm2Rx!z}%>v%bH)ejnX;@m9j(Z1L!30FvAZXQ0vE2h;tjhA z%GyVnWeA7j1bLIy0Z;*jbR90YUTu)!a0V8XXp^I-zw{BppT|D@NFr)>MNmpN(Yp*u z3H2j0$MNE*=$c)efw>Ae@rn7})gMzOe&Q8}IUFv})o5TOrA~s&OKz!MPM%D$UZrI6 z;r(DrYnR!$^uW)4dKFdyEzGDA4+%PvJEy!Q2jQ{gherv^jI4;k590MIhGk!?d}Ohy zi$3@q{FSvU>%%Dj)#$6bctZGuh`ml_eW*&{j2*sgLKL;=zgU6vnw$1(_OIA`Nkb^z zcifJ8KW^80M+GK8Zf;*(4Vvq-zVj$5I(%SY@%5!{8785fx1daxn2jY0SyyVtIT+2^ zts*&NoX!_aAwIr)YX7E%!;XR>g|7b_e+<<`&~^F~L?S>};KSO#0Hc>D3K&NdBcr6* z+1bIpzn&T=BMNEK0d8{T3cL^SQ{eiDX%NLq_)Xmp-6}!;nsnYVTw`9qDQlnh5sL67 zU0t0cJa%Rdd-NUAR7>lATiYu&06oBU!_)ZcRgxVMZwhFt@}GI(+Q}0qUYOI*gBLb5 zG$`Kd%#HAShCmuFE-wBvD5XaU*n{W71jM4^goZ9e{Jaww9|*Yryte&JbI`*Bb+52p zfyaLhVJCLl%i@{j!vn31PdD>&@5%A(n_gkT00jQx@&lW3yTRayM#sj+PARv>t3>Dm zZv4)j)c5b-pNd@Btv_m10NC!IE(yQCx&of7M zZTmgvr&^u|={!FBWM>G^+uIvZ=E9=q$?AW^@@tmDBO4nVO#r3S;XcVgs#ArDw%WxH zp)>-nTvzf7+`~MVKJ5bgZ|49b-_X)>I6^0XV4-P(URzshR_;@v>M@?SQ`Rgvu(+Zw z82k4H=yiNI2rd!N%@B3ouBZF<2Zf{{W&JT4(eYiTvP$+}3V%uf;IGu;GY8TlbZ~{` zM4NxF&VR=D--|Am^k}zgeo$9tU`fd#965pWcy$0wmigM=_pbHoZ zOQ0-R#DI!Fw~A*8$rYXUWVjMcv##Y?bru35zct$D`|TL}&k>TUqsfu8KnSr0%ewPV z9rf{R|BRL1!%R9T6jJhARCOWddoC)+?<|hhLVLLFptb^i17U%%aoS~v|>lg)C-F00+oe1RMwBQLM7ues_bFexJTdnp7SPSz)) zB-L@|Y>1hM79pnF%&{ZuYu5Gr>Q0xV8018EDWwHKJdvikk*^0g`VhoCUJX-;IaJMc z^|13SgGqqaOu%-iVUo$*JVa-KQ>_fRj^e*=&lkRxa_h@q!k`i+`psN?)!Wx$?o0y|@DA-_hzJh$=*h_w zbxQn7GjRg}O}O57_UrkY{)^&?K!j6BjW@sb?c29V;>Ty_ehYuC8;hdcK=h1Nc21lJ z9Jj$24^WJ;A@*|niDc^~Bl!0MWF9U60e0haSC72q++!Lp&B2O0?PC{Ces4ILx{Tmx%Q=g61-tXDxT(+%H zQuUvOyKQ<%`soy z>6W|da(uxS*D>DDAjLH0TT*ige%_5&duD5Al)_hjk9e;r zt!5Ew?vmOres_;Q`3({U6OVyusAyt`0MtaBHBp5slpZ-yFkZNra(2QW>GVQkax_Fqbw^QW4pR-D^Y^zNp+MSF z&>h*d%i>K_Q`2Dd1du8(>Bh*>Kp~w8BD$&XWbr$ouD>w%)ade^Hv!n}^pTh|zv4+} z;L{Ln6wOuE%A72Lnj!5ZA2qTv4L#9DQFr^182{Lq_NgHCgCSF_j8Nh4rC%WP`3lAf zVV#rz0I+$E6F&+vva+^}<>%+qK%|FYM6hXGBixj1kq%<>$ucYa9E}ZjM$*imFsyv} z>5ocr>+4}yipvGdAUxN1yG=AV=54-2Ea;ANUj4ameW9Q&nP+BF=?%8sqpl0P#n$ z%V(Omnsh`>;Xja3P0#08YLlD1y_)mIC-&y~nVw8MTE1f8rU1=n;P!>!%#rk38L{EEl%y(d|wj1=M3J$Sld4fH5u^j&*dSTY8Q{ZU}&aGS`v>4)n#3GEyKX?nBCwHRyvVt=6iu=DE0g`&LS{OBAjv5f6@)WmFpg#We5#b#&i z+W6uq+6Gc(y{LtMIWQztBNvt}hgg>s%=7O&h0G+7O5Yb>vE0pJOKC1%R8c1CWXSI7 z3jK|vZtPP@_5oV6l5O1C2zXpc~77l^qx+z|#h|=RDv(Y8-UX`d< z58d`_F}`D{aaBA5wm5Ib_S>mMY4kDqWIU@2ivxGFnMAx~$u@G9j^iRc`}4oSuYGa zXGgz9#l>Zwq^h4ZF7>u3^qBY&;xt#h)NK2s)Nlf%E4sY@w2&qC%OOxr2u{2=uO3K1 zhH=LLSQV3=MC?z!W8f}e2Zw1K4WWKnUV$_xCaut^%fN|+OEts%w~vzes%oIf+mP_| z>A>)tM#)(7vzA9$8HM=y@8^MHnGw2W&s$7)Nk}cpN(W#(;rp>Vn)u0m`cxsWg>NSHi~9ATE{q29EN`-#mTvSecO95d3JhFXfeI!j zgzWRbp+|1y>}0oZP?W21@ohzUdHLX5bI1|LB8;uYb5>w;pN+#Gn_+>8XWIuprEo%p-%|Ain$&j!XHR2?UQ=u9 zV^JCHt_+>y^22B})#;fgORx-zYq6?pzPc#q(a}-KVBG$gjA5=XB~vv*S8sL-!u=BK z3k1L|ZJ!&X%U?F@n>V54FD)}lVg;vi=l(%sh4*!klmwqL75#hVx!E$KRILLD^=aHK*_iCPjiS3O-JZ7q z!$xZhn~zG*&+FZ0{_OWUA6KMJ7fELfNatjHKl-Gy^zC8t!l_%AL20&E#pTA`LvzPc zLA7x;?G;eSYquW_1KT!>)0U$9id2xN9+c@lY@$EkJK!@@QDmf z>dvw2w>01hbngc+=lXfA-!pQ6HTmV*#QJq})9et(21RF+(JR-7>2kUq(U}>C#arxZ zj1=$nY?g|D^X-b7Y_Ka-iOfQ%3ZUvGuzhNl$H=8}VKu|wuarC6diQVA|EOS7y%8|J z_`Xi(^0{azd@HQQWlkE(rgv3~`T>s@Ts9BIxfpraerE6sM77QVh5N)9+v9gW;X4o& zz0>GP{NTAImYc@+iT+Z;Q11u7a_d>vW_r zu)43x`apR>qg2evTDsPXLUq6Pp>8m%^||3v7I(AeC%Ooy3}HT0b#55s#u_26uh=d0 z41{Z<#s{W*3X?@Wj1c2uE+02QK@akm{giL>3rY@ffN`h0w<)(%HE*NdNAu>C_?l4R zZzErml(2)J=kCbcM@#D_A2LerLtiR?7Coou!2s;-z>oNdRlRk1pz&5RR`{pQg?c5JLU7aWk6Zh_o{Ax!29wqS6ZqrpY zim_>3$7)wkq=c8BGVEIVya~>#_qx7@y*6;KRcjmpSqeg|cdkr)d#8VQo1SZrf@}Y$ zDIZ`ZB7DCp7VRC{UW{Y?gu-z$KceNW#p?pN$y=akwcEEPKgmbJS#9X0QMHQ8lby1H zL|J+HttOsUcBpWWcnL#beoP5)vFbdHuFT98^*aeQD9Z4XvL7J!3z+`PA-!KC&9f|u z4Rb2jd{n538or^XJ(ea8_f9i)3CU}ghP>k`Z{KPIwxbmQ$Qn*Rv#<|!^e1N__vqKE z&)u+3XvpxfOooZ+^(3~_o>nI{SDF<-5$`D{BAz^`dDGwDFH%>0v-$oj{x`}@zmvgd zC;$VPn)KN;0$BO)6pNJk++qN-D!WbE?w!2T%==ik{dVM!zC%%HMThpZsU?Vte{=}3 z6z{`M-zOZB(L1D(Wp?GNuO7lNA>Yc~K|CIZ0_DluCA+tku zSSC5bcWse1N6=i+(h1#S-&12hk-7j}PtX;+LBGE>>HO9SkT!pS4N(BBS|N`P7;e{k z<1P6Mm;iR?pQ`MCN~hgmrAbFMnCc#q@mMpoIzlr(q`#$e`sO3EFTA_S@86$*>cph; z;T@nf0`9h)4yeu*n}VZb@2rNW%8cCEoN zjv)Yj>`qHsdwW`P@)@CBa>k(iH8C)FGdt_{cxAB0w0<|!xhpws+ zqh-yd58AHU{g;)Ybf;F0+D;}9kR##6Wnb4*-LDPQ?)_82%G%O0``WRcg^NnH&%-E4-6_L>u28S_ zc26bs4)O>!U@aenBoip9lnlPm)xKMJ5J=1GUqQjH22B*?l?2IceK2}%vBH^Z@C@=+ zf=eXZOV9HDd6U=e6C$InoD219UaVv1SHl%Y#tg5>q`xmBrxZT#@#E@Qf`C^7K!%RU zYeT@VLA+oPY-@*S1y`*3)$xPHDdbibWT!p5%Q6|Vw*VH*9otw}96JW!BpsOhl7<>% za(I?^{eS12)3g0FcTwynv6N@amn`9(-nz@>DPi#?-0zPw%1=4{xC?M|?1|f#(gZIn z9g;IIybMAB9bh1xm~M~xhg$^P_Re!U=f9M4MdRP>!6H#qk?DvDO7dtT$s?dcQt>Sb(foD30s0m=j$BI0utIct^2 z;usl{#9OFvqjXGTdylN|GY#vBQw@5E^a zh<8D{Fw4KACou6Fq#RXKQ!}p90Ad13tZm{2X3Txx5Rlt4A-XTOMbcudBU@)ViHB1g zgj{`vi@Q-u^5jXxGDs3|dcSQY&=+GBY#sobNTTJd?B~eCdV z9xh>dXk_f725i_tizRLJ?uxK1si&SkGR6S}kjhqx0LV1?B%Fhrp9b=ZT!kF^@1?Tx zmd5iE_#N-}23U6$I@Nfq<+zTFIP#PhW6O!*to<}zn`}D7QK`C(h_vL8mZ`6^U1q&W*lrloPt|RD(AZ7%JRAviR#4K)S(Sc;uzb$lnEH=w%LdW$@jBXcrkL zl41bElz@eY8fwMre1LeQjau+ji$ioU-Xf=wS-2xCJj34y_|}3!senL6K$%+S;ZWdb z6Hj;(LIn9{M#B$~SJkM*m5+Tdc&@!H1$&TB1hswu`{o?Tmm#f9q;Kzt{uvO(JWWL~ zkc!xA3&3Wr9v@dZ5QVndtYyNi!0Qw(07Z2KRGLB)@8LO+COP#QgnNa=H9)iML;;@! zvoE)iKFm>oML-7Th4}Co)vW~U3V)ahoDV+<5`lTqVe3l%vjIe%Y_PS|H)7{>kk*u( zM*4{Q9pI#eIPK53{kAYAtO+&h#7b&S7k=xi??U1-7ldVAiQ|k0*BiNG7XR$T%dRQl zGl#BwdEy#A!3GUl`Wa_wwA+c_xxm{Ff1)EtA*vQl`@l>zeCM~=d{6Y9+;bS_1$n^f zm1`!D^H<=NdT6{%6=E{pv&XvxP?t5ydwQq?v~KX#R3|=s)4nkWxVABfW)}AyCuy#Z z>Pz}94J%jf`6ClxaOsT}b4WfnB?gc(Y*&F-uj8Ur!j6i1t*9CqSh_O9CCGp28!@zZ zYBXpt?h*ae*YOt(f^O7P>FB|j=+qv|-$y$gV)^J2KB<0PLJ`f!F0%v+pD)nWpX z_8kwHzkVA0UfqRXl<*#WE^>ECb&Uxuc@QjJXhs+;slELN!R0rZnt8zuX4@5pMil6@tZ}@KPL$+f##ql4Y7^%A96j*N>Co+Tn z6c!XUBfkVcPQY`@nt@N~th>qUu1f(kk;Gt!Jmns*gBuM|sX-{1Rl4Z~^h0k~g`rET z5?xVV7)$q|AL0J92O&>kj{kf|e(mc2$p9q>dtGMPNr+C&+~iY1(7QOt7Cv8Gf~O@iPD#X?J#sJtHfh{9KT ze{T*bdi%_}-^VVsr2WHz-8%@=6_;<$&Ln@HQE`AjMRXtsv9+&sB+w4tD~j^u5o}{- zjWp$k&x(?>XHk?x%Iq|v3Hh>XH=2Rm!9~GG4Q+`jJ1Gx<)bgBEl7%A1@KZu9iicaJ zl`s|7Vt0_Auan6tVJ6}-&D(bs{WYGENfxAGAyI$#uI3Hq83k?2 zK6y*o(N?U$rW}k(IcP(e>te2XbYlmeDu+hvQ*a%u)6$~3H1xXH+@iC!Zp~b@sswsN z$q8ztR*@h_jk$PbcT5YDBd7#(NI%Xq^b=pJ5O%%6a%CcpsgAp;W$=IA5zu0Sk&1VD zpdMqtItF#UNKB@fyWkN4;hftSFMcWE&K0YC!refxC!V|iPrZ2yuH(`6*Mr^iUC>?) ziC45YO!Eif-ywSkIs++!-SpaAdaL@>$0>?T6{C)QoQ7!kzN|#hOW!PLSSBZGMU&uf z){2ptv6vqE*s!d^cUcyb>F#*f!J}R53n_{Jy!BZ6yTrY981CFcPfXYD`T&%@!@`3(2PapnNEB~fcTS1AR@yDXlJ`i9DXYd`_1wc zk7k~3_#7SWe?ZUs($dsgLcZT@?(Fvq>F?9#{jg&#_A85mX{uUD~Ce!WV%l@60 zb3D_AMVe_AJ*!OX5Rsa@|tKyA!F@2(fpQ1bcRm}+o#X_pmy#>jmRd_xfonfk?lG{}54|=MdyCvM%UkwQw+YhuEd104 z_=;Kpc%#Gbl{Co zpgJ*%T>IiJ(qCn0hh187X1F}b?%%K@A*xuBR)_VnS0$1< zG*SwE+t26T2&IH@F0N7|e#}OnsC-qj8%oc)zS!OUr?k-0#e199F=U}o zhxLj0Xt}6em%eW*&~5yYQrmJJ>rkndlhAIjna-IuT=qBgG40-@Xz0+!XHDrkB^Kdb z*tP?9p=a2@eyX?1W@b|k?#Q4)Gp~;KdBb!5WPtSKgDpU~zbC*eb9Q*YNur+S0iUyY z%G!CA`ZxQ#Re3mfvqWRP`=Qr%$J|AUnY@+j&Yl;B-RFQAICL$HziO#V{py2C4g?#3 z!&Cu1x_Gi4fxEmQ9#FXp_HfOg!#ZV18CuH6D9`P7_Mh`!D5^MLVM+|ZW0d9!d;*x; ziKRJoUZYbkN;9D2Nj;al%5z^-!^2S$0Xb6bQ>Qe8fX5WYbw!9Tp%&H_5r~W>hVmy? zoKGTt%euI5&=r_J`Qtt%2V;SYMwMfspB%Op;rG7t)bSrtZ=U+iT}-*$8$YKk6m_WW z#nHAYnG~z@y1t2j4^i|njNqAqww29o3tcJs9P+9}URA+*P{*m_Z9hl96nnbQ<7_y6 z=tiAQy$|g|N+$aU+EF@E#^V5ba;Px+1m%s&rqV;O2)N)bS+gBxZrxYLb8}FX_L38> zz~4@a>a4l}fNHGkgI_vHtkN#GqOOcpE42Xo?@_1W&wn%b|F;4;cTblu+4#5Sn_Giy z?d&1_DN-=8xMlk<6GEWaft^o|YG`Pb*?uvZB;xE3Q@ogOTOab5~2&8^31a4 zPwUC0&L0K89G##rJAhlNz-zjHN&*9b_*{xS{vtmNZX9>YsbgXpP)znK3)4VUiw4D0 zJE-r4_nbJhy39jsX=!y*{RV)cFe}ftJ`}&I35S$y&<&Ro1XsNPb@XXaE?>X?I#hUH zD&#L2{QSIuN@Wg9+!90-9M^a*}fH)i34!R+yGzAVFI%LBH z!nq|N{=GV#b+^0A#btnBJgyA}Bo={c1d4U5>dhpUdpJ#FI zM_B6y?|{Y+p#Lf3lAmpQQy=+31-t8oyR}#SGqIXX{(_$p1nHGbw_Mp&wMZCL0r~)f zN86+uV4@@g6V{3Ekh>XbMs@qR0X72Qo@o&Yh&-&&& zD*-x>Md3?;v@mVpUzGAl zQ!qB`Fa1}S(t$Pu+E{4-d+)roP5D#ydtkX|rR@C6A6jC3h4K8tupQU0Uq5!N*%9>BU_XoZh(o%Dj zBj&36&F?P5#yKZrgncA9W57t`Uy+{v;oC&ozOiAw)$nr}7RL*`#D0E3zSiA(a#yhn zLzfCc+ouDt$B-_y3{-ks6hZD3z&vyS28AACUr6$wZD;mK732V7`8&VbA^?x2QZ{BigcB$kd>E?l^VyA+%v?JrJohCWF zP7E2FNnL)`WhV9^<<1SKA|gK${na}zoRZg|Oz;6je_uRXKK-%!uN&UbRRa5UpmYxO zh=pxnO%EAb^cA}FRU4d**}IDj=wk;^MDMo6R6W>aqlqqC#3lAt-H(&epSz@9N3D9P7+Wx&ovXk&oW&Yx=ngpbh&e;9UTF2-k|Y zyX=0}92ni6X|8zz;z}o{V##5*BSa(F0GR8Xc-tLR){-xQgi^Is1j%6n6Ts6Eisq=f z^qvMXuD7M`v0v|%o1|E5Kn8uzs(r!R6(Qs3d*qGM^|j?W$Vqja`3zL16M(p}u|`)w z60T%goTB{;ala|8ZqRTEJf|_$uiV55EnSQX_Nk6YeH8M;$7g`J0ra!=HN-r4&v0SZ z>ubuVQh)jQ_;Sm^8_FpwqruB5@p4*jR@PdxFJGkXA*1N%0%iwHC}T_84tD3n_>Ktd*WIi-doykCM?-`|ov1 zCo10T%(Wq9dw@`GV^Q){Z^zeT#{V0#72ue(v@`*o6cC~ncRceZ2R#*z>ImEZ_phOQ z_z{|XX$!Hud1x$A(n z24^yy0MAH2zwkFNBcFsvXIf-bt1&;2(Q8os|TljJSU<+wE;1wdhw5?rboSC7-2w<1$d#>7cP!MqVdMJ71JDRPDk-mnBU4nab zi^{+j2#q^et`Cm>fWPz%y> za}BmjUD-lJ0%JsVTY)28WElC0%exFtM*-bKeVzLTT3ZI%77=WDtKXxkcrH?CsI9H- zWkP~jwHzhdC#;CMzQ$VuY%h89_QSz!3LwoL`l>^0n7|L23C<|#%U`pgV7eh0SK(1m z{d3%IRv;M&3+z@>5Om5)#hyKTet_@&5}ar;ZwcVlwaVl033X&}O;!60pqdHPKYoqt z)30D6DcL>@3Ye@bKXvmk&`t!phKgLtvADT7o%g^iky~kP#8~hi0&L_jeDQfida+qcJ!1!Y+|qZVV6VE@RLRg_q<#kw(BgiBIKIvk0Ak<_Jle0G zAAJcrx@{Z0J0L@uTCf}Zhx*YPu;{g)WFjpbj}*uigj*|9n8S51xm$AKo5ZS9cIuz* z`}o`JcQ@?tu7+Z4b0o5;yM04K3?wiIwoJSemuiV}#^|^|arE|H^cg5Y-oS;#cNyWc zKwClUTegErxKhlEAJnljYbz@&Y-6bVXxpKt8K{Ll+Z=)Lxu^PTK;9gW9XzvDxcIHC z=zHXu#^z>AWuY-p%xL%CS?hAj7H2h2WWZ<3a-=-20rO91wBWGC9_4_w6%n>K6PRp5 z=k^Uuk1-C9xsI{rQNI^2xF>+id-Q6EP#ckz$&8c-UZOf zt_Mg;;SvrqkmSJo)%${YysWIm{Wl?$H3Q>}1%ln{3JxO4$pps+L08}r0BtanjN+1s z!me3Reg)?Z+-ZqV2M#P`Q*{X_(I7wdUWFT{@7hD>xj^2E&yNabls5Syrz-9t;atT{{ zmAbwXE)EPp2GjFEBR>atlJOFUVu_Vrm`ywdI!6cC#qqDaNMPyOM7IO~YiP8Qywi^l zo423!TA_eYBYAHf7-%y9`ewg&o>T`avX81YY&02;_;&NBX!eu*NJ|$e5>C|oS# zxNTh;Fq!X52!8{Hr+M?g>NmdAN!rDC;`jglO3mAU6PJAdfoVTS@juA)*S`9fCHOzs z`PYE_e*pJgi`+T{>;kD!Z!mOZNX-~zDp&#?J^lSRAu6f_^!mF4v3v}4i**1tkqp_q z0N5M1>+uBgoDGw>GALCt{=U(TW`Z6CL{$vvP}pAhi~8WZYTrg;0CG_gI9N4SXTSLX zcs0AW%ptfJVuvn47F1wddZ%r$>q)Kl)I21!PdR}eP8&_o14D00nFzE+bpTsM)`JHR4213AKP<$QJ;twDx>Rj~%$*;Ak8HHlHe6UX z3-k=pUmi93;5ORk5)TA_`CzYi2_CU6NEQNcAlB!_++WI-&l&=KkU&7(1ZJDNkVYm;9e{(wo6Ye>-L>HWT78TG#r3e@B|)CCMyU@aG&FyOeu3! zF3j%VSQnfgRfc@X9fOV!RH&-iV;4^X1g|N8C2ddSE1(m_&OrxJ0cY4YZ6%m@`o+D5 zk_qNlEx$(tG+TWNUF2O2L)WDIp-ylG;vCHz%T1{k#g0EJF$yY7ta4dkyrBP?Fu zoS*Geh9qFcahQdvK2oha=zp`pJ_KzO=Rtdn@%q}TPU|oaYMfda2g=LbOryLb*!84C z9aY5epAzDxf0!lG#SpCcdUM|^^NVBuhq||nin?pVM}15LMFassN|7!lrIk=ZYG4>T zrMpA1C;>qc7#bOx85p`tK}3d*p%DS;5Tpb|&;G&izR!Es`Eu6z@;|edAJmEc+jZaj zy07cnd{|GAF*;IiJ+25Bi>`tAYS$J4@|yuEBLeJ8EIWE34*2KiZu;mqbAE;|=Ct8m zNQksE56(=a4#j|ZMmlNKq{wvi1kdR+b#H2rP8sl2KwQMTonx+xYb$_$b3p*B3J!Y# z_Sn4rfr|P}DZjmdgLX=9XoK?ryz8K%Nl10yje|CyMTjlYa(wtCR~AgFzM|hg-3OS5 z+@;Mx=gNONsH1ucTP&x?FHie4vwz9jrYkDr_jYS|qI$h{g7iJwXEpBIjalzdXQ^Rj`C1HPNTX0Z!7hULbcX0Eg7?34B^W3h`kTP!J+MzhQiD z02JZS>qEI%G$`K00P{u4H!V?i_1~1Lfa4{!GsnclK>me?uRIMDP*^~$JPTkEs#oh> z1kUEQj>)lrlB56~)s1{+C@mg63LyOc2WnbM9CW+~O4bKDzrVg1^V=D(0PlxFE}#gd z0*!S#*qjW2D}mBef`0tD5N9+n+&heDNI-+l*DbjZC@%r)B~V%*v|4Z$TA2P`2q#1~c9kQ~`hX5k6y&Ve|d9 z+w}~~g{_nU;=5KM!he*10{uH)l34k>YXn}0p?A^Ia3TIQsDTnE;;Dj+R>r% zlyIPb?%=^L0=SHj77wyYTCLxAgh;@ECcW{D=hWE?3jcm50?6)3>BXRP%m20{S`=$|LOq^Y|OtKi3KnVu+Hm;HYbQo zWJ)z~j=}pb7_D#BGpJyOUXUccdrx?HXo7)oD`#V4vk2;-zaE1G=6XgVI8OO0D=I1u z5dfJ~Rc;Ng-Fgk67_F6mp$~XB0Ni;0+ino1txaW_OfTtM3fbXc`|eqkz;*W@^Tyw} z*Z^DfC@q~+ggxE={#k|Hn5*MGQ+R`+*uG=#4-<|82Uq69Hhfq`{fm*V?v%nL1l(Jz zJpA;NnMCbJ!KjGTETdl>*AvRXL{GeBDiin3)Gch(fRJiDF~!JMLA*Na~*n(%IDu>eMD;x;HCZ&6iX%7G>xq-Y5sB^e4Pkc>~5_<^oT z_L+^}>v{9E^eZ5O{g!(M*naLnYrO>OUfOL)9?G!h|LIA9H}PsU;*Fu2pJmLQ`BZr+XZ@~t)2&r z3qY%?0a0l05F*nz63{kDP>qG?a0B`hSz`bNY$}AD7sY_59Uipff2KF~ITt&-j??-l zQUG}jKrvt6u}WV0*T+l!zz6hy-pZEj{toy*h5pq8<|zD-(CLsT$@E~K^QeV|_4%YT zXD?h${pf#g2}oz5h}yJ_0XUb&8wN?4vZRUZ>}<9j$dnnp-WKCg1}ZAyK<9u3+s-0X zp$1HqbRgt|0S#Re;F$7sB#CMQQ(L8*={$@+U;!4dlH*AhrgZ)&Kkg zv^7{*SwW*Y0O@KVYQQ-<=et+UJEJ7LX#gsU$Q zVxV%rUoL+8P>{8v(qG_Mi~+KKbh1J93bNaBKBKTkIC$os0G! z?1Kfy(jAE+YQPUbC+AgI%e1KsRO~Tz>TMYc>f95K`%}cO03WMpdYF6a!Lky(B9Xb`fT$E(fi}o9w5m~ zZnV!!`91kSdQXFBMX%gG{yz_Fr^SviGQxtTGY@i`i{_uf5$Y*$3J7y=rbNs`jG-UU z24e?Gwx;$~;LHVNdTi2%jB^l?K;(cxb)ZD41}axll|*5l(@wx9nrLzicqGbR1z->p zz|n*JzIsoGuL0}E!0U{XWIEG|JJ0a!U7S5FYy46G&4Jk2Au}6{Np}c2Q`3-Cm)vv z@^sSya;F0fxG%jYtmL)8`yhLK&?%5F1)803+x&i?z}L4L zH0oc<-rcXJmk(?7oit^$)wAODl03Kd>pHqOTj>S z>0ZO%edz;0S?vxN`I-ZC_d=yu$XeR;x9AVT^O^XKMkj#97A&l-Hh&8+^==7Z29Gzr z$`u%nDy+GY5JpWl=J^Q2`~O;#^Z&7sSp@KHI2>*R)_DRLDXD`^O=$auv`T_@1Kdz5 zgU91rB{LHeXrZ_X5{FCspg05HnCn9L_*9XH$N}#RDeO+NutNfH$RLlX0)7`D1!LJE zbYB_-`k%7`C5DDWXaeBzJ)nF;Z3LZ+?D}&wz)oT@1WZVR->!l&%;p7R;fo9sI*`d0 zC>0$jf#Q@lI8_gI{kK+RdS@1}VJr^7>AxSK$PE8QFkDdvpoj&)XmkOvwy1{AT#$ko z;?4k9>U`6tAe+tk9uS>kW;>HHqmDHyfLs7cPsz9Irb4-(6ojnY50tVdy|Hmzyu1T| zNOKOV&_e_Qh_gNq$hE!TG?fZW&J+L&OteoJL=9{3+aN}I%%g&X7AvDv#T0>ovssR< zIH>!S0*@2*;d1MqG%_5(e__y>6*5!VcF7!r9PG*JMWZ+V00I-LY%9OwYcF7QV!-@# z8m|YKgVxYCI8p^^>m2nc6EQgSgId8wxJ#Yy77VE4AfFQ8bHiGzrlxi&#tE{Gi{7~7 zc4!YV3;AaN#xGX@@LVwOt)eVHDj8|X%LfAAbTWvN1d`cautqO$dn96Zwr-0w^I#de zI?ekssh%NkT{`;Ja6A}BH;D3?pOX@83TguoI|5`y1%T4S0fZ`AZ9rgU0~9T!ACsYZ z@7#$>Y)RR;;|gfz-g6#ct3Vw3N&NGkYYy;*8tZ`$=;D&dXSi32dLvIk?hH6G6VDM= z2q;orm@)91y}SDC@6Iz1VA8P!j*9)DDwYp9`j(bdCv?U*kj@bk6GulzM*hyY64new z46?--Br+&e_?)d{*p{~kNA5vwr$AwKnxp2SJ_;0!Ol3m)r#R~XgdtKz$Sw*k5+1S7 zaS8+RD%o;`_-gkdR$#seQvYly7k}&U*wrE>Rs}Gu%-sPdwg_rk#uXBv!np_Nt||X2 zIswp$GEpBc`DYa39kcrU`Ycj^nR7}a#2+(nz%v&rhN!|b4t7i?=xZCw&v|M;`;2$T zCTzqxA2xJKe8gwD8n}f+?9Z!z3F#;(L($;kTf*Gb2SC|}>d*%XXJR0F6FiFQ5%wdv`i4%|49~XepBmF30BMXF;vTMQs7ChwgxB7>RahBs4Y`u`>}{`HlPL#R3@iX5@gVaXi8KJ_>HgpAMzY>>t#qW)zt?WrEkV{R z;Jr)WY;xYb56T#T^+Gzp);?^ph7qri`&N-x_iR1_fzQRRx$TNS;0v8aMuOVo2-^!^}8VP6m)n(o# zq#skZ^Z0+fIVw`~E1u*FK6@$n$8%x1CbQl@v$ni7{HO7!!7h08X$ zsNUb}u-2ahWaorEDTciRQPN;kIebsAh5;xN76Fvfm4DCG3T%*TIjf4s+VfJj^Xtz z@8`j$9ii_Zo?VY_@+A+oakydJ##vJOX+!i%%OH<3NgKS>HL}z-h`dNJ-?8pJa(1)l zUHhtD!niVqoJT9F?>-v)R=ex`{?Uy5)^HhTnw%gf6A16kS-j`#>*FOU$)%#EhS95d zom{RJM%AeaU!cu@RneW3CgtmEl#-G{)7PggW-IsmYQBAs@p|4#n-v+Go=@K+kG@fz z5k)tUi;kp<`Q)yTU)bB9@^kO9qN#RLnyH>i=IARk>!E{xDi&g{D`1G9(A#-RMbaic zx6}O|-Hy)9SBlP zkoChpDWvTQ1#;RXx%;No-hZ`CM`sQ)DZ~%~$bm-TeKdb|vR3g!z3mV!Bb~=4FM&rl zcs<_(#y$7)1t=Dcn6_*wF@_m+0d#a$8v0rn*e@l{AcctO zGg{Y{%p4PjEpXbb)|7E6Hxc-W{a_%@T4b<9C#R^Z-hWtCRzR;Lg?Ku{kAFm!lPAx| zQBYSb(ATt`}uI;(d9k~*D@^n1JhL0If`hW%E0p| z1q>Z2H##~rB zZ1f3Ffx}OuZAiCu>rhO$R;dSkIBu{h< zpNHsOTzlj|dkiu4O_8JSSO$K7g>R793g!6<~(`vQW6oSrtTw^Dz)beDdKBp0a?c zSo{Ym3Ip~|GfGw52wXnN46QE5Tsa;dPh&73uLAhdp)v5HVyvIgW3CZ|e_VfB zSTvjoenk;DS{^kOye;*$cVIDEIaH58O3;*h2r=p!d_d$AtP$9PBeoKlevmO4$6m=0+XoEq4p?pN;urtFv^Jjj*TM+Gp;7< zrWP({&QW>a7G|_xt?p=;Np~y`^Sg|jY$%PO(9w?Bs`L}; zLK$S1THZ5krYk8tJT*KYzXGq-J2VcCWoyP=b}p;uN{v12S?y}v~(Vo zgv2muBRCN8hY+?KPz*8$8m zeG_-1Osj~WvcS%`0hU~G(>u{u!(0@mRT1GUF0+Xxd*(%dK1xG7%VGQ-y zBU56=s_8SV`p>xS(k~vzg~J|J#N?`LuiwaQ#=uj)%23jrY2_K!xL@919UPq!Pry_U zr;b>WcV@8?RiIMU&(}z_!UV{V_>L7m`pnTyVm3lCI8G0CgdsJ~qEx|gID!sW=E{y} z;2G#EIywk0OF@s)zJh60sw#esLyEbQAr&$lO7#X-$0Hv6 z-ZKr33g!_6{pYe}7~(cH#_u;8m(h#=Zz?Av+Mmm>NO1>0x#oZdK8<$m_qQx_4bKLls&L*D5MTT6hb>qa&H& zRLae5@0O0%rRSvp*PN)7{6+1$(%P{OXx4{Md_gI9`GWi4Ipk(>q`$KdHu9oSvxh+&9odD|q8-JLrYA43DiVK? zx81X0hPUX&#g`ROdORb{$SKh88*q4kpsP?!E9KE9RwFDUy8Prn^YH?T&QmJb1!JjB zFcS8>26lj5knQ2!GSR)&A%vrlrwjP!s>Vl`Zs*#^XjLZ=V6{Sh5O_LcB6%$42H{^Q zdEat|MIEi16P||9CyI$dbSZNp?@jQ5`IwF$VWZCtL4D2>yL^(PAw3!_BTB*!YXVGq zgOly*l&W@N@*4ksb2+BiHaCYK)fn2r4Zlq}_z2C93D*J@u8~Ux$!Y{(dWZ+~ZOtn@ z@A`Sf3u-^IcJ+C{(>|Y@P#fWKj@{s9OhLY+dibRy1H{6%8_`X| z$_0L*{C$}TmF|DpVyXe0`DbpcIGoR^-;psko00nf{Rh5bk*}UO;{OQGkP728ydG1a z$Z(V^n+(da2Rk66E^9UjFYI-G;5MxE%IaK*%`FirN-DDoQk{q8Q;EiNKKzd6Msk%c za%5U5HMeEFqikg+j8y^{tBziC*yvas>C!uFQ_TSH)A9aZA=y)-+87mWiHFs+U0)(c zRx}Vi)G}2w|MNM+J8@iJLUZAs5A)rGOCBln2vFc2O8Sw`jEF^J#?@=1kJZILp$U=6 zIm7=h#7LQl?2)U4Kvsh z1I)7xL<*kcIN<@#8aB>l2AC9>C&}^t(#9Mu+t(BNpJl=nElUBMz!12yXwJ}#MtN9m zwWnX2qX1zg05fG3|51d#JJMB8Nhk)kK%8qIex*Qh^=Rf7E`iVYMfNG-9JnlImEab= zi=r?U`0)mT1&%^04|0%8>@!}_X(vX&&Y0^z?WPCq*83@r4|IjMs(0q+5f)P%I8x^u z)#(9IO4e%44=JeB2bG96!TDMRnBbN|cmxi&C7S?9(hz7?4gNIM8v(MWDbVpuH*E6& z-4}FFh6v-3Q;i2yTmp^^-K92t(F(UvccawbqN6cywMC=TVT91&3&4X4z(EI|Wp0qx zP7rwYKy*>)?mY-uhVV4VF9=YG_Ql=;n}9CCC%@LnM(O|=%G3uyM~7Cd0J1ifcAFQr z>Ae|#^|lPe`G9IM;lRk0&I_QBP%+F^ZCUF~tCDC%0hmcy|NeadrC&zRsEW>9Irtgd zZ~^a%F!ISXD~fZhnn9{BAkoj=W)iu+(-p1&lJOq6$cY*Ws+57mcg{T~T?NiFv9U{l zW9>z>2VoS>c>v~|!+rTeWI6%dVeXb0Xo6=3{9!iVwSY?@)nrtM-ED#Zz$t_xfP$A3 z9{6LDHI{%b(1aX))Smu~0-jVnc)UC5DBag8+zQYe4ggrH0Ac_=KSfHkKRYwC0Gwk6 zpbPQp4_({P*KV0Shi*m*Ao~GGLfy~}i39oi!N_5)=fTPB1Z20RcS3`w)GJpce(ECe_< zHa3z$OfPHT)useozyW~NB+SSrF0K!B0PlguGY>!|wovUgZj_9%3I62I*F9x`L%UwG zr?#PisU}v3iXy(`8-k3KMT8JnX`y*=9s7Et(v@@xR2;FOS{>xs58ca?3Yo2=EbwEq z2>_6Un9)oQK#pko9&!#R>uhmmitPnR-r6vzt&p|v3S9dMDFU_!A>#WIl^W+HvABKR z&&2?3C3`mnl!o%Q-HAL#`1GDf})j3*bE=knyQsNF{IQkqLb|Ak% zRlj2F@1$*A4ga8Y1hVrzYH~UYJGvdK@qQ_mS*m9~w@pju{?e_(4zW&2aAyaX?-5D< z{K9@3|s)BMbbX*1T)m9wqg%5As?F6Mg%*29Ry75iM`OUuqH9aE`a@#6WhD3cu$>>k@;UC?$CApd&nUMdeQnowhwiUMoiDDYXi;-;61(;Cb~QyMIBvDfhuW z^un97XB-7SYa~R@51|3YA|D{-H8(vw0BW7upd4_){EO&AH~A!g5k!s;6&_g4+y%H` z5yUs~*_aJ%HvI}TIkHTkrul;mw|kXd%pKY@H-TGRKzHYhq=^E9Dt`N&fJh2+GJBT-2se)RK4T_tD*sU>nefv9l_ zRNl|WSf`kL6eu8Nl#<|v+{Kq_JFbsFSAU+YfsA2ZOG4(QOTYo02<`+V+j2xxbfu!H z;-Ki+I7c@EIC+1;`~#w2NZ%Pn^gZE~9JirR{4V;-544~dhcP~V+Zs)QH~pRYOfgPKi+#~hcXK$@SH2!@osJ;SGNf>l9P&Wf)j<@uQLha#dNETB{O8QS^U5r8-%u@6?2$y{# zznIND-YT)0II1Y}*}T4+ve80A!3=eKkjBPI-)HdvF3ICWD z)_o&As3SG-FEam=?52iUbZLfyX7*F`U-~k=;0lg`Zz=J%gVh$UOWRueUnu!vv+95c zV=U=jbwB#CtA29cyXM8;yG@ICHqOVCjmRqR8>R(H)}zm0ls!hT3Alf}F6lmrlI$<@ za6D#Fh{pySy4@RwYz)=y{Ps47=YOA3;l1a22khY$;k+qtuTy@B_ZJE_g^IG~?LIbx z*m2(|nT$KPHtHR1fY-6z(4YX;QH3d(o+~9KzZh9Z>6J;hf;k)p6Y9PvZPkU!${ALe zTKOL%h>8O21)jXn!J8FFyRV=VND1B{{0QrW;UFB^Vo!TDy0H$EzgCt??)Bg=de56%bQ-U z;FC!@i#|SuB(MQ|*CwR&A$-PD-9zK{Tjega|&&5&C1?P;B3TkX^3xK-ja+oCn9Z}=RAsOPhRAkTl}Uj;zlr~5p5 zwm-#kZq;CH&Ej$F#=13mN(_f4lqVpIOxY3*`nLX&W!}(i3(h{2!5PA?VGZ!BZu|ZW z3ozf{9CL&jl`47zd}8tz_7=u%#V&}SbAWSv1%sz-%_j``IncSG@6Z9rSk}_QJJ>*Z z7oHr$SvF$?l0{1+YOE;5n_HG8enpG64PKk2WMhHJgrR2+KYMpWmOWUZnF6NkA@jZ< zbg5$(?`vvVmXfHEX=N9CxewjAUqP)6GN3fp%iC(}c^JISu--ds%saUVt@`_Vk|I?{ zN8!|6a2|MMOWxc2t}-QFJM-Byy(aO>n8ho`KEHg;0 zpf_#g05jEks^9g#h)9oOATG*)qrz8poUmjq!3iXi9sO{=iABiQS1;x>AGW8sH$ZPr zVZT&)@E(1iP>yCbX|Cb6Z4|clp^b)%!4KZN9_IJj3o*m+gc4zFg1RtupG2^0$P;jK zDGeeVe7eCLHVt-tJ?k?4RaYl}a1L(Q`?E5XEAs_?uTX|nP<=m_ESqnxpLE9dR>oJ4 zg0Dup%zo8r_y6juQ|KA8@fB!zto$?AEDj?a@e_mq1aQj{VW^+d{YzfR*TJz&N?$j< z_QJyR2ph3XwV;Tll*^po&z7nU)|V;&^;{6kaPuWvP+dtx$=XeLIYnm0l(jhH$F6Nm z>#4uP#tV_o{LI9qjqvH{?CafNS=_25?2r&woA=I1GESjK)zh`I8bLCA-W^oLJ zAgEXz(^gbK4aW1;Vr6ON9qgQ?Hm2|fN^&$~pw3)rJR8S+d(Df~CA};g zF3|V!q^EX0)1;cb(urFpD@>!jZLm8G<16 z^&`9c;X()_qN6OIaE>O4a8!{V(-WWjWM@o)Q+<g`;^atpdD6`T0vtb54mz08_&wB=ccLJ$uXb&HUXPq^7_n>WHXivu@Qs%XT^m3S4} z)GZxfRC_L_wz+=oe`@0<_*g#IXwN%@wcRMUm-ut+QMRlCB4v39o41a7QgQ~^;f$G}UpF0=~?L3`HnX7ydHrjedEV|=?r?#73aL4y@T0hN}Sj=rbKKfyg1`EzW1 z0}nR98m;B5I>Ku1_T6O<&ezt6>Tbuobnv~#SRmaW4Z6=*u!`Jg5kM>s8RIzB@53WW z?G3X?cG?h*+SOUS9KCTiT$|t|O~ypnUjS}6#JL_I=wV?Hrc7@7GC0D=W^sa7a)eV9 zl;s++NV`!4&L$tZju&IHE8-Qc%^vw~wV1!qwzZh8TvnWMWA|ucJWmF2mp)61f_3jt zS{^Qy96#)-s%jcUdcrqTNt*=_e=};!mG}&EBVyb93hcO=r_YDKuTx@X;~}2A za5%u-S3s)KgPtNb9cvfzppbCqcx5xOy?`=v%w8yrjULtJG(;r{3E&INU)0wcQ*YV4 zXS6h>KN#j$Nomlwh!Hxjr7uf>WURCo9U!GoGZ+X<(g+o`y^_PcR}L=tsFu=)vDn9{ z0y2rboR)X`YAarkA6C_FgDrLCJTIunRF4xtmu^oVIuu7;W7g8SJebyq zzGY;fqaADKqb(9Mv!h;Ht6(AUC%w51VeO+4I8yOtpyzQp!pO4PlACMb+Izmf7Lq~M zH%BCWGD(mR;&8>4mihS@e!$jQaWjuB!|v(#MyV}-%7pv6_9~6V5dF>kfCp%H9sZHTiFUn;%iE4MvVS!_*o*1NLrEh%s2Np zf?5Q`ExgKlmzSSxb6g{xtfF7BUZ#=Zym&!AYRJ2gdTD1TRpg6hp4`!E^P>s-gK1V2 z^1^EOojBM(!v^nZq*}gju~_x?uIa#X1Ux=6u9<7Pkrk}2(DK@eCl6iY(&*|F>wYzU z@uE({K9SM*V7OPp-sK3k$s+x3ell&;SXPst!s0czp>sEmUJ3RWW~|3v557vh{G1SFj;Kd%_<(%7a#eu zIXVcRx6thVP|*i>nbRP{Ddzsy4-A!Q z@Y#@8M=w?qSy9?ponE4uj9s*rn7)D8%cgg_qJf|2<%uZU<$I?Bc*f4`tCMi|ylB^> zVQ&m@Lz6fUkCFFNRXJyf1XxWtw0w;?F{NmJf`riLX*<_sMa8Z!xa5nxlbKVeMaj|b zdR{E?n^nB#;c?>~l}OsEZJic+G~zDB@<%G6_weuKlfT6nv4-krvpZf7ZhM(GePk%z z4i?ZSBRqax!&O|*`PMr`=k{P%Uu8`e+2s6;Q#TpBbw)r`;qgcY&!s=TByWo=zio;% zUcQr8aL4mjur3S3SZ(P^Hzi4hdFgLqHR45{PSoUiJP!!t74;lNm#i*!ad}A%CH?Mx zk!(}m%iII2s8U}tcEga2<}<+ou78@C1w`XiXwiPWW6xF$v}VP7`mZ(aos#s%6!Ddu zA+)Y%KNv+vkKn&}+KfYRS94D=+$=UIUQ;4Eiq`exl!d}jZQ-Zi&Ov&UuRN^=7-q_K zogMQp4f1fyRt>(O=kfRU)#vi{zYTvJK^T$wgGTj;2(AQ$bemg&FBJ5)t|J!taau@z z{mX8#%!|1_a}Lh;#UdFc;&dNJ_aoc9^jw;zT)8nE>Zc@Y$A_}-Kcc_Td|w;OOc)O_ z2he*i7n@%sOLEA)yBn`B8>oNZFnI(;mwMfz*eu8S`c#D39p-WN%^$6Kj|*7vyOr*$ zw`ZegQCwurvQ9!M6!UX+!N}p4$L-rRV}H;hbVbb^;Y8f?h*rplmUD}r$C~z~eAs0# z&L?0I2 z)GEOb+$DDLGrDicX06Qc%g9RNon+TKz_s>Ak$#hK zU^LJZEZNq9$zriX;dWHfrY>-)8|ya*mKPfL4kQ(BZc#r!eq2HDxaNy*GF(2rM